Loading the data

carbonData<-read.csv('/Users/angadsingh/Downloads/Carbon Emission.csv')
summary(carbonData)
  Body.Type             Sex                Diet           How.Often.Shower   Heating.Energy.Source  Transport         Vehicle.Type       Social.Activity   
 Length:10000       Length:10000       Length:10000       Length:10000       Length:10000          Length:10000       Length:10000       Length:10000      
 Class :character   Class :character   Class :character   Class :character   Class :character      Class :character   Class :character   Class :character  
 Mode  :character   Mode  :character   Mode  :character   Mode  :character   Mode  :character      Mode  :character   Mode  :character   Mode  :character  
                                                                                                                                                           
                                                                                                                                                           
                                                                                                                                                           
 Monthly.Grocery.Bill Frequency.of.Traveling.by.Air Vehicle.Monthly.Distance.Km Waste.Bag.Size     Waste.Bag.Weekly.Count How.Long.TV.PC.Daily.Hour
 Min.   : 50.0        Length:10000                  Min.   :   0                Length:10000       Min.   :1.000          Min.   : 0.00            
 1st Qu.:111.0        Class :character              1st Qu.:  69                Class :character   1st Qu.:2.000          1st Qu.: 6.00            
 Median :173.0        Mode  :character              Median : 823                Mode  :character   Median :4.000          Median :12.00            
 Mean   :173.9                                      Mean   :2031                                   Mean   :4.025          Mean   :12.14            
 3rd Qu.:237.0                                      3rd Qu.:2517                                   3rd Qu.:6.000          3rd Qu.:18.00            
 Max.   :299.0                                      Max.   :9999                                   Max.   :7.000          Max.   :24.00            
 How.Many.New.Clothes.Monthly How.Long.Internet.Daily.Hour Energy.efficiency   Recycling         Cooking_With       CarbonEmission
 Min.   : 0.00                Min.   : 0.00                Length:10000       Length:10000       Length:10000       Min.   : 306  
 1st Qu.:13.00                1st Qu.: 6.00                Class :character   Class :character   Class :character   1st Qu.:1538  
 Median :25.00                Median :12.00                Mode  :character   Mode  :character   Mode  :character   Median :2080  
 Mean   :25.11                Mean   :11.89                                                                         Mean   :2269  
 3rd Qu.:38.00                3rd Qu.:18.00                                                                         3rd Qu.:2768  
 Max.   :50.00                Max.   :24.00                                                                         Max.   :8377  
str(carbonData)
'data.frame':   10000 obs. of  20 variables:
 $ Body.Type                    : chr  "overweight" "obese" "overweight" "overweight" ...
 $ Sex                          : chr  "female" "female" "male" "male" ...
 $ Diet                         : chr  "pescatarian" "vegetarian" "omnivore" "omnivore" ...
 $ How.Often.Shower             : chr  "daily" "less frequently" "more frequently" "twice a day" ...
 $ Heating.Energy.Source        : chr  "coal" "natural gas" "wood" "wood" ...
 $ Transport                    : chr  "public" "walk/bicycle" "private" "walk/bicycle" ...
 $ Vehicle.Type                 : chr  "" "" "petrol" "" ...
 $ Social.Activity              : chr  "often" "often" "never" "sometimes" ...
 $ Monthly.Grocery.Bill         : int  230 114 138 157 266 144 56 59 200 135 ...
 $ Frequency.of.Traveling.by.Air: chr  "frequently" "rarely" "never" "rarely" ...
 $ Vehicle.Monthly.Distance.Km  : int  210 9 2472 74 8457 658 5363 54 1376 440 ...
 $ Waste.Bag.Size               : chr  "large" "extra large" "small" "medium" ...
 $ Waste.Bag.Weekly.Count       : int  4 3 1 3 1 1 4 3 3 1 ...
 $ How.Long.TV.PC.Daily.Hour    : int  7 9 14 20 3 22 9 5 3 8 ...
 $ How.Many.New.Clothes.Monthly : int  26 38 47 5 5 18 11 39 31 23 ...
 $ How.Long.Internet.Daily.Hour : int  1 5 6 7 6 9 19 15 15 18 ...
 $ Energy.efficiency            : chr  "No" "No" "Sometimes" "Sometimes" ...
 $ Recycling                    : chr  "['Metal']" "['Metal']" "['Metal']" "['Paper', 'Plastic', 'Glass', 'Metal']" ...
 $ Cooking_With                 : chr  "['Stove', 'Oven']" "['Stove', 'Microwave']" "['Oven', 'Microwave']" "['Microwave', 'Grill', 'Airfryer']" ...
 $ CarbonEmission               : int  2238 1892 2595 1074 4743 1647 1832 2322 2494 1178 ...

From the str of carbon data i can see that i am having empty vehicle types as “” so i will replace them with No vehicle


carbonData$Vehicle.Type[carbonData$Transport=='public'|carbonData$Transport=='walk/bicycle']<-'FuelEfficient'
#carbonData<- carbonData %>% mutate(Vehicle.Type=ifelse(Vehicle.Type=="","No vehicle",Vehicle.Type))
str(carbonData)
'data.frame':   10000 obs. of  20 variables:
 $ Body.Type                    : chr  "overweight" "obese" "overweight" "overweight" ...
 $ Sex                          : chr  "female" "female" "male" "male" ...
 $ Diet                         : chr  "pescatarian" "vegetarian" "omnivore" "omnivore" ...
 $ How.Often.Shower             : chr  "daily" "less frequently" "more frequently" "twice a day" ...
 $ Heating.Energy.Source        : chr  "coal" "natural gas" "wood" "wood" ...
 $ Transport                    : chr  "public" "walk/bicycle" "private" "walk/bicycle" ...
 $ Vehicle.Type                 : chr  "FuelEfficient" "FuelEfficient" "petrol" "FuelEfficient" ...
 $ Social.Activity              : chr  "often" "often" "never" "sometimes" ...
 $ Monthly.Grocery.Bill         : int  230 114 138 157 266 144 56 59 200 135 ...
 $ Frequency.of.Traveling.by.Air: chr  "frequently" "rarely" "never" "rarely" ...
 $ Vehicle.Monthly.Distance.Km  : int  210 9 2472 74 8457 658 5363 54 1376 440 ...
 $ Waste.Bag.Size               : chr  "large" "extra large" "small" "medium" ...
 $ Waste.Bag.Weekly.Count       : int  4 3 1 3 1 1 4 3 3 1 ...
 $ How.Long.TV.PC.Daily.Hour    : int  7 9 14 20 3 22 9 5 3 8 ...
 $ How.Many.New.Clothes.Monthly : int  26 38 47 5 5 18 11 39 31 23 ...
 $ How.Long.Internet.Daily.Hour : int  1 5 6 7 6 9 19 15 15 18 ...
 $ Energy.efficiency            : chr  "No" "No" "Sometimes" "Sometimes" ...
 $ Recycling                    : chr  "['Metal']" "['Metal']" "['Metal']" "['Paper', 'Plastic', 'Glass', 'Metal']" ...
 $ Cooking_With                 : chr  "['Stove', 'Oven']" "['Stove', 'Microwave']" "['Oven', 'Microwave']" "['Microwave', 'Grill', 'Airfryer']" ...
 $ CarbonEmission               : int  2238 1892 2595 1074 4743 1647 1832 2322 2494 1178 ...
#carbonData[carbonData == ""]<-NA
colSums(is.na(carbonData))
                    Body.Type                           Sex                          Diet              How.Often.Shower         Heating.Energy.Source 
                            0                             0                             0                             0                             0 
                    Transport                  Vehicle.Type               Social.Activity          Monthly.Grocery.Bill Frequency.of.Traveling.by.Air 
                            0                             0                             0                             0                             0 
  Vehicle.Monthly.Distance.Km                Waste.Bag.Size        Waste.Bag.Weekly.Count     How.Long.TV.PC.Daily.Hour  How.Many.New.Clothes.Monthly 
                            0                             0                             0                             0                             0 
 How.Long.Internet.Daily.Hour             Energy.efficiency                     Recycling                  Cooking_With                CarbonEmission 
                            0                             0                             0                             0                             0 
library(tidyverse)
── Attaching core tidyverse packages ────────────────────────────────────────────────────────────────────────────────────────────────────── tidyverse 2.0.0 ──
✔ dplyr     1.1.4     ✔ readr     2.1.5
✔ forcats   1.0.0     ✔ stringr   1.5.1
✔ ggplot2   3.5.0     ✔ tibble    3.2.1
✔ lubridate 1.9.3     ✔ tidyr     1.3.1
✔ purrr     1.0.2     ── Conflicts ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::filter() masks stats::filter()
✖ dplyr::lag()    masks stats::lag()
ℹ Use the ]8;;http://conflicted.r-lib.org/conflicted package]8;; to force all conflicts to become errors
parseList<-function(x){
  str_remove_all(x,"\\[|\\]|'")%>%
    strsplit(", ")%>%
    unlist()
}
carbonData$Recycling<-sapply(carbonData$Recycling,parseList)
carbonData$Cooking_With<-sapply(carbonData$Cooking_With,parseList)

carbonData$Recycling<-sapply(carbonData$Recycling,paste,collapse=",")
carbonData$Cooking_With<-sapply(carbonData$Cooking_With,paste,collapse=",")

#str(carbonData)

dummies<-function(col){

  items<-unlist(str_split(col,","))
  items<-trimws(items)
  items<-items[items != ""]
  
  uniqueItems<-unique(items)
  dummyDataFrame<-data.frame(matrix(0,nrow = length(col),ncol = length(uniqueItems)))
  colnames(dummyDataFrame)<-uniqueItems
  
  for (i in seq_along(col)) {
    rowItems<-unlist(str_split(col[i],","))%>%
    map_chr(~str_trim(.))%>%
    discard(~.=="")
    
    rowItems<-rowItems[rowItems %in% uniqueItems]
    dummyDataFrame[i,rowItems]<-1
  }
  return(dummyDataFrame)
}

recyclingDummies<-dummies(carbonData$Recycling)
cookingDummies<-dummies(carbonData$Cooking_With)

carbonData<-cbind(carbonData,recyclingDummies,cookingDummies)

carbonData$Recycling<- NULL
carbonData$Cooking_With<-NULL

str(carbonData)
'data.frame':   10000 obs. of  27 variables:
 $ Body.Type                    : chr  "overweight" "obese" "overweight" "overweight" ...
 $ Sex                          : chr  "female" "female" "male" "male" ...
 $ Diet                         : chr  "pescatarian" "vegetarian" "omnivore" "omnivore" ...
 $ How.Often.Shower             : chr  "daily" "less frequently" "more frequently" "twice a day" ...
 $ Heating.Energy.Source        : chr  "coal" "natural gas" "wood" "wood" ...
 $ Transport                    : chr  "public" "walk/bicycle" "private" "walk/bicycle" ...
 $ Vehicle.Type                 : chr  "FuelEfficient" "FuelEfficient" "petrol" "FuelEfficient" ...
 $ Social.Activity              : chr  "often" "often" "never" "sometimes" ...
 $ Monthly.Grocery.Bill         : int  230 114 138 157 266 144 56 59 200 135 ...
 $ Frequency.of.Traveling.by.Air: chr  "frequently" "rarely" "never" "rarely" ...
 $ Vehicle.Monthly.Distance.Km  : int  210 9 2472 74 8457 658 5363 54 1376 440 ...
 $ Waste.Bag.Size               : chr  "large" "extra large" "small" "medium" ...
 $ Waste.Bag.Weekly.Count       : int  4 3 1 3 1 1 4 3 3 1 ...
 $ How.Long.TV.PC.Daily.Hour    : int  7 9 14 20 3 22 9 5 3 8 ...
 $ How.Many.New.Clothes.Monthly : int  26 38 47 5 5 18 11 39 31 23 ...
 $ How.Long.Internet.Daily.Hour : int  1 5 6 7 6 9 19 15 15 18 ...
 $ Energy.efficiency            : chr  "No" "No" "Sometimes" "Sometimes" ...
 $ CarbonEmission               : int  2238 1892 2595 1074 4743 1647 1832 2322 2494 1178 ...
 $ Metal                        : num  1 1 1 1 0 1 0 0 0 0 ...
 $ Paper                        : num  0 0 0 1 1 1 0 1 0 0 ...
 $ Plastic                      : num  0 0 0 1 0 0 0 1 0 0 ...
 $ Glass                        : num  0 0 0 1 0 1 0 1 1 1 ...
 $ Stove                        : num  1 1 0 0 0 1 0 1 0 0 ...
 $ Oven                         : num  1 0 1 0 1 1 0 0 0 0 ...
 $ Microwave                    : num  0 1 1 1 0 1 0 1 1 1 ...
 $ Grill                        : num  0 0 0 1 0 0 1 0 1 1 ...
 $ Airfryer                     : num  0 0 0 1 0 0 1 0 1 1 ...
carbonData<-carbonData %>%
  mutate_if(is.character, as.factor)%>%
  mutate_if(is.integer, as.numeric)

str(carbonData)
'data.frame':   10000 obs. of  27 variables:
 $ Body.Type                    : Factor w/ 4 levels "normal","obese",..: 3 2 3 3 2 3 4 4 3 4 ...
 $ Sex                          : Factor w/ 2 levels "female","male": 1 1 2 2 1 2 1 1 2 1 ...
 $ Diet                         : Factor w/ 4 levels "omnivore","pescatarian",..: 2 4 1 1 4 4 3 3 1 2 ...
 $ How.Often.Shower             : Factor w/ 4 levels "daily","less frequently",..: 1 2 3 4 1 2 2 3 1 1 ...
 $ Heating.Energy.Source        : Factor w/ 4 levels "coal","electricity",..: 1 3 4 4 1 4 4 1 4 4 ...
 $ Transport                    : Factor w/ 3 levels "private","public",..: 2 3 1 3 1 2 1 3 2 2 ...
 $ Vehicle.Type                 : Factor w/ 6 levels "diesel","electric",..: 3 3 6 3 1 3 4 3 3 3 ...
 $ Social.Activity              : Factor w/ 3 levels "never","often",..: 2 2 1 3 2 3 1 3 1 2 ...
 $ Monthly.Grocery.Bill         : num  230 114 138 157 266 144 56 59 200 135 ...
 $ Frequency.of.Traveling.by.Air: Factor w/ 4 levels "frequently","never",..: 1 3 2 3 4 1 3 4 1 3 ...
 $ Vehicle.Monthly.Distance.Km  : num  210 9 2472 74 8457 ...
 $ Waste.Bag.Size               : Factor w/ 4 levels "extra large",..: 2 1 4 3 2 2 3 1 3 1 ...
 $ Waste.Bag.Weekly.Count       : num  4 3 1 3 1 1 4 3 3 1 ...
 $ How.Long.TV.PC.Daily.Hour    : num  7 9 14 20 3 22 9 5 3 8 ...
 $ How.Many.New.Clothes.Monthly : num  26 38 47 5 5 18 11 39 31 23 ...
 $ How.Long.Internet.Daily.Hour : num  1 5 6 7 6 9 19 15 15 18 ...
 $ Energy.efficiency            : Factor w/ 3 levels "No","Sometimes",..: 1 1 2 2 3 2 2 1 3 2 ...
 $ CarbonEmission               : num  2238 1892 2595 1074 4743 ...
 $ Metal                        : num  1 1 1 1 0 1 0 0 0 0 ...
 $ Paper                        : num  0 0 0 1 1 1 0 1 0 0 ...
 $ Plastic                      : num  0 0 0 1 0 0 0 1 0 0 ...
 $ Glass                        : num  0 0 0 1 0 1 0 1 1 1 ...
 $ Stove                        : num  1 1 0 0 0 1 0 1 0 0 ...
 $ Oven                         : num  1 0 1 0 1 1 0 0 0 0 ...
 $ Microwave                    : num  0 1 1 1 0 1 0 1 1 1 ...
 $ Grill                        : num  0 0 0 1 0 0 1 0 1 1 ...
 $ Airfryer                     : num  0 0 0 1 0 0 1 0 1 1 ...
summary(carbonData)
       Body.Type        Sex                Diet             How.Often.Shower Heating.Energy.Source        Transport           Vehicle.Type   Social.Activity
 normal     :2473   female:5007   omnivore   :2492   daily          :2546    coal       :2523      private     :3279   diesel       : 622   never    :3406  
 obese      :2500   male  :4993   pescatarian:2554   less frequently:2487    electricity:2552      public      :3294   electric     : 671   often    :3319  
 overweight :2487                 vegan      :2497   more frequently:2451    natural gas:2462      walk/bicycle:3427   FuelEfficient:6721   sometimes:3275  
 underweight:2540                 vegetarian :2457   twice a day    :2516    wood       :2463                          hybrid       : 642                   
                                                                                                                       lpg          : 697                   
                                                                                                                       petrol       : 647                   
 Monthly.Grocery.Bill Frequency.of.Traveling.by.Air Vehicle.Monthly.Distance.Km     Waste.Bag.Size Waste.Bag.Weekly.Count How.Long.TV.PC.Daily.Hour
 Min.   : 50.0        frequently     :2524          Min.   :   0                extra large:2500   Min.   :1.000          Min.   : 0.00            
 1st Qu.:111.0        never          :2459          1st Qu.:  69                large      :2501   1st Qu.:2.000          1st Qu.: 6.00            
 Median :173.0        rarely         :2477          Median : 823                medium     :2474   Median :4.000          Median :12.00            
 Mean   :173.9        very frequently:2540          Mean   :2031                small      :2525   Mean   :4.025          Mean   :12.14            
 3rd Qu.:237.0                                      3rd Qu.:2517                                   3rd Qu.:6.000          3rd Qu.:18.00            
 Max.   :299.0                                      Max.   :9999                                   Max.   :7.000          Max.   :24.00            
 How.Many.New.Clothes.Monthly How.Long.Internet.Daily.Hour Energy.efficiency CarbonEmission     Metal            Paper           Plastic      
 Min.   : 0.00                Min.   : 0.00                No       :3221    Min.   : 306   Min.   :0.0000   Min.   :0.0000   Min.   :0.0000  
 1st Qu.:13.00                1st Qu.: 6.00                Sometimes:3463    1st Qu.:1538   1st Qu.:0.0000   1st Qu.:0.0000   1st Qu.:0.0000  
 Median :25.00                Median :12.00                Yes      :3316    Median :2080   Median :1.0000   Median :0.0000   Median :0.0000  
 Mean   :25.11                Mean   :11.89                                  Mean   :2269   Mean   :0.5047   Mean   :0.4977   Mean   :0.4997  
 3rd Qu.:38.00                3rd Qu.:18.00                                  3rd Qu.:2768   3rd Qu.:1.0000   3rd Qu.:1.0000   3rd Qu.:1.0000  
 Max.   :50.00                Max.   :24.00                                  Max.   :8377   Max.   :1.0000   Max.   :1.0000   Max.   :1.0000  
     Glass            Stove             Oven         Microwave          Grill           Airfryer     
 Min.   :0.0000   Min.   :0.0000   Min.   :0.000   Min.   :0.0000   Min.   :0.0000   Min.   :0.0000  
 1st Qu.:0.0000   1st Qu.:0.0000   1st Qu.:0.000   1st Qu.:0.0000   1st Qu.:0.0000   1st Qu.:0.0000  
 Median :0.0000   Median :1.0000   Median :1.000   Median :1.0000   Median :0.0000   Median :0.0000  
 Mean   :0.4979   Mean   :0.5041   Mean   :0.505   Mean   :0.5073   Mean   :0.4992   Mean   :0.4992  
 3rd Qu.:1.0000   3rd Qu.:1.0000   3rd Qu.:1.000   3rd Qu.:1.0000   3rd Qu.:1.0000   3rd Qu.:1.0000  
 Max.   :1.0000   Max.   :1.0000   Max.   :1.000   Max.   :1.0000   Max.   :1.0000   Max.   :1.0000  
table(carbonData$Body.Type)

     normal       obese  overweight underweight 
       2473        2500        2487        2540 
table(carbonData$Sex)

female   male 
  5007   4993 
table(carbonData$Diet)

   omnivore pescatarian       vegan  vegetarian 
       2492        2554        2497        2457 
table(carbonData$How.Often.Shower)

          daily less frequently more frequently     twice a day 
           2546            2487            2451            2516 
table(carbonData$Heating.Energy.Source)

       coal electricity natural gas        wood 
       2523        2552        2462        2463 
table(carbonData$Transport)

     private       public walk/bicycle 
        3279         3294         3427 
table(carbonData$Social.Activity)

    never     often sometimes 
     3406      3319      3275 
table(carbonData$Frequency.of.Traveling.by.Air)

     frequently           never          rarely very frequently 
           2524            2459            2477            2540 
table(carbonData$Waste.Bag.Size)

extra large       large      medium       small 
       2500        2501        2474        2525 
table(carbonData$Energy.efficiency)

       No Sometimes       Yes 
     3221      3463      3316 
hist(carbonData$CarbonEmission)

carbonData$CarbonEmission<-log(carbonData$CarbonEmission) 
hist(carbonData$CarbonEmission)

carbonIndices<-which(names(carbonData)=='CarbonEmission')
for (c in colnames(carbonData[,-carbonIndices])) {
  if(is.factor(carbonData[,c])){
    try({
        anovaaResult<-aov(carbonData$CarbonEmission~carbonData[,c])
        cat("ANOVA of ",c, "and CarbonEmission", "\n")
        print(summary(anovaaResult))
        boxplot(carbonData$CarbonEmission~carbonData[,c],shade=TRUE, main = paste("Carbon Emission vs", c), xlab ="CarbonEmission", ylab=c ,col="lightgreen")
        
      })
  }
  else if (is.numeric(carbonData[,c])){
    try({
      corTest<-cor.test(carbonData$CarbonEmission,carbonData[,c], method = "pearson")
      cat("p.value of ",c, "and Carbon Emission", corTest$p.value, "\n")
      plot(carbonData$CarbonEmission,carbonData[,c], main = paste("Carbon Emission vs", c), xlab ="Carbon Emission", ylab=c)
    })
  }
  
}
ANOVA of  Body.Type and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)    
carbonData[, c]    3   82.7  27.583     149 <2e-16 ***
Residuals       9996 1850.3   0.185                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
ANOVA of  Sex and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)    
carbonData[, c]    1   55.5   55.51   295.6 <2e-16 ***
Residuals       9998 1877.5    0.19                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

ANOVA of  Diet and CarbonEmission 
                  Df Sum Sq Mean Sq F value   Pr(>F)    
carbonData[, c]    3   11.8   3.944   20.52 2.98e-13 ***
Residuals       9996 1921.2   0.192                     
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

ANOVA of  How.Often.Shower and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)  
carbonData[, c]    3    1.9  0.6333   3.278 0.0201 *
Residuals       9996 1931.1  0.1932                 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

ANOVA of  Heating.Energy.Source and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)    
carbonData[, c]    3   71.1  23.702   127.2 <2e-16 ***
Residuals       9996 1861.9   0.186                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

ANOVA of  Transport and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)    
carbonData[, c]    2  384.7  192.34    1242 <2e-16 ***
Residuals       9997 1548.3    0.15                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

ANOVA of  Vehicle.Type and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)    
carbonData[, c]    5  573.6  114.71   843.3 <2e-16 ***
Residuals       9994 1359.5    0.14                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

ANOVA of  Social.Activity and CarbonEmission 
                  Df Sum Sq Mean Sq F value   Pr(>F)    
carbonData[, c]    2    8.5   4.248   22.07 2.74e-10 ***
Residuals       9997 1924.5   0.193                     
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

p.value of  Monthly.Grocery.Bill and Carbon Emission 8.380793e-21 

ANOVA of  Frequency.of.Traveling.by.Air and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)    
carbonData[, c]    3  568.2  189.41    1387 <2e-16 ***
Residuals       9996 1364.8    0.14                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

p.value of  Vehicle.Monthly.Distance.Km and Carbon Emission 0 

ANOVA of  Waste.Bag.Size and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)    
carbonData[, c]    3   53.1  17.692   94.07 <2e-16 ***
Residuals       9996 1879.9   0.188                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

p.value of  Waste.Bag.Weekly.Count and Carbon Emission 3.257926e-79 

p.value of  How.Long.TV.PC.Daily.Hour and Carbon Emission 0.2282313 

p.value of  How.Many.New.Clothes.Monthly and Carbon Emission 8.851718e-131 

p.value of  How.Long.Internet.Daily.Hour and Carbon Emission 1.411988e-09 

ANOVA of  Energy.efficiency and CarbonEmission 
                  Df Sum Sq Mean Sq F value Pr(>F)
carbonData[, c]    2    0.7  0.3284   1.699  0.183
Residuals       9997 1932.4  0.1933               

p.value of  Metal and Carbon Emission 9.628913e-17 

p.value of  Paper and Carbon Emission 2.622474e-20 

p.value of  Plastic and Carbon Emission 3.357259e-07 

p.value of  Glass and Carbon Emission 3.371649e-08 

p.value of  Stove and Carbon Emission 0.3583156 

p.value of  Oven and Carbon Emission 0.001109564 

p.value of  Microwave and Carbon Emission 0.7301457 

p.value of  Grill and Carbon Emission 0.06209031 

p.value of  Airfryer and Carbon Emission 0.06209031 

knnModel<-train(CarbonEmission~.,data = carbonTrainData, method="knn", trControl=trainControl(method = "cv", number=5))
knnModel
k-Nearest Neighbors 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6400, 6401, 6401 
Resampling results across tuning parameters:

  k  RMSE       Rsquared   MAE      
  5  0.3912082  0.2416923  0.3099291
  7  0.3820210  0.2612955  0.3027079
  9  0.3789765  0.2674213  0.3010474

RMSE was used to select the optimal model using the smallest value.
The final value used for the model was k = 9.
knnPred<-predict(knnModel,newdata = carbonTestData)

rmse=function(x,y){
  return((mean(x-y)^2)^0.5)
}
rmse(knnPred,carbonTestLabels)
[1] 0.0004033061
lmModel<-train(CarbonEmission~.,data = carbonTrainData, method="lm", trControl=trainControl(method = "cv", number=5))
lmModel
Linear Regression 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results:

  RMSE       Rsquared   MAE       
  0.1216956  0.9235284  0.08674322

Tuning parameter 'intercept' was held constant at a value of TRUE
summary(lmModel)

Call:
lm(formula = .outcome ~ ., data = dat)

Residuals:
     Min       1Q   Median       3Q      Max 
-0.77905 -0.05477  0.00826  0.06510  0.47201 

Coefficients: (2 not defined because of singularities)
                                                 Estimate Std. Error t value Pr(>|t|)    
(Intercept)                                     7.409e+00  1.167e-02 634.830  < 2e-16 ***
Body.Typeobese                                  1.843e-01  3.854e-03  47.825  < 2e-16 ***
Body.Typeoverweight                             9.380e-02  3.854e-03  24.337  < 2e-16 ***
Body.Typeunderweight                           -4.979e-02  3.841e-03 -12.964  < 2e-16 ***
Sexmale                                         1.511e-01  2.721e-03  55.551  < 2e-16 ***
Dietpescatarian                                -4.430e-02  3.843e-03 -11.529  < 2e-16 ***
Dietvegan                                      -7.990e-02  3.860e-03 -20.701  < 2e-16 ***
Dietvegetarian                                 -7.180e-02  3.871e-03 -18.548  < 2e-16 ***
`How.Often.Showerless frequently`              -7.940e-03  3.827e-03  -2.075  0.03803 *  
`How.Often.Showermore frequently`               1.786e-02  3.850e-03   4.638 3.57e-06 ***
`How.Often.Showertwice a day`                   1.058e-02  3.838e-03   2.755  0.00588 ** 
Heating.Energy.Sourceelectricity               -2.236e-01  3.809e-03 -58.694  < 2e-16 ***
`Heating.Energy.Sourcenatural gas`             -9.702e-02  3.843e-03 -25.244  < 2e-16 ***
Heating.Energy.Sourcewood                      -9.863e-02  3.866e-03 -25.513  < 2e-16 ***
Transportpublic                                -1.970e-01  6.834e-03 -28.834  < 2e-16 ***
`Transportwalk/bicycle`                        -1.722e-01  7.231e-03 -23.809  < 2e-16 ***
Vehicle.Typeelectric                           -5.033e-01  7.590e-03 -66.310  < 2e-16 ***
Vehicle.TypeFuelEfficient                              NA         NA      NA       NA    
Vehicle.Typehybrid                             -1.377e-01  7.638e-03 -18.032  < 2e-16 ***
Vehicle.Typelpg                                 3.441e-02  7.609e-03   4.523 6.19e-06 ***
Vehicle.Typepetrol                              1.909e-01  7.639e-03  24.994  < 2e-16 ***
Social.Activityoften                            8.625e-02  3.329e-03  25.912  < 2e-16 ***
Social.Activitysometimes                        3.894e-02  3.326e-03  11.707  < 2e-16 ***
Monthly.Grocery.Bill                            4.704e-04  1.873e-05  25.120  < 2e-16 ***
Frequency.of.Traveling.by.Airnever             -3.610e-01  3.865e-03 -93.387  < 2e-16 ***
Frequency.of.Traveling.by.Airrarely            -2.413e-01  3.838e-03 -62.885  < 2e-16 ***
`Frequency.of.Traveling.by.Airvery frequently`  2.643e-01  3.815e-03  69.276  < 2e-16 ***
Vehicle.Monthly.Distance.Km                     6.730e-05  8.023e-07  83.884  < 2e-16 ***
Waste.Bag.Sizelarge                            -6.017e-02  3.833e-03 -15.698  < 2e-16 ***
Waste.Bag.Sizemedium                           -1.264e-01  3.841e-03 -32.917  < 2e-16 ***
Waste.Bag.Sizesmall                            -1.948e-01  3.849e-03 -50.609  < 2e-16 ***
Waste.Bag.Weekly.Count                          4.161e-02  6.827e-04  60.946  < 2e-16 ***
How.Long.TV.PC.Daily.Hour                       1.272e-03  1.909e-04   6.662 2.89e-11 ***
How.Many.New.Clothes.Monthly                    7.067e-03  9.244e-05  76.452  < 2e-16 ***
How.Long.Internet.Daily.Hour                    3.958e-03  1.869e-04  21.177  < 2e-16 ***
Energy.efficiencySometimes                     -2.072e-02  3.322e-03  -6.236 4.73e-10 ***
Energy.efficiencyYes                           -3.183e-02  3.370e-03  -9.444  < 2e-16 ***
Metal                                          -6.951e-02  2.722e-03 -25.540  < 2e-16 ***
Paper                                          -7.439e-02  2.720e-03 -27.347  < 2e-16 ***
Plastic                                        -2.879e-02  2.722e-03 -10.575  < 2e-16 ***
Glass                                          -4.867e-02  2.718e-03 -17.907  < 2e-16 ***
Stove                                           1.493e-02  2.719e-03   5.492 4.10e-08 ***
Oven                                            1.834e-02  2.721e-03   6.743 1.67e-11 ***
Microwave                                       7.785e-03  2.719e-03   2.863  0.00420 ** 
Grill                                           1.791e-02  2.719e-03   6.589 4.72e-11 ***
Airfryer                                               NA         NA      NA       NA    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.1213 on 7957 degrees of freedom
Multiple R-squared:  0.9243,    Adjusted R-squared:  0.9239 
F-statistic:  2260 on 43 and 7957 DF,  p-value: < 2.2e-16
stepwiseModel<-train(CarbonEmission~.,data = carbonTrainData, method="leapBackward", trControl=trainControl(method = "cv", number=5))
Warning: 2  linear dependencies found
Reordering variables and trying again:
Warning: 2  linear dependencies found
Reordering variables and trying again:
Warning: 2  linear dependencies found
Reordering variables and trying again:
Warning: 2  linear dependencies found
Reordering variables and trying again:
Warning: 2  linear dependencies found
Reordering variables and trying again:
Warning: 2  linear dependencies found
Reordering variables and trying again:
stepwiseModel
Linear Regression with Backwards Selection 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6400, 6401, 6401 
Resampling results across tuning parameters:

  nvmax  RMSE       Rsquared   MAE      
  2      0.3760164  0.2691345  0.2992884
  3      0.3594632  0.3318216  0.2867232
  4      0.3460790  0.3807424  0.2727536

RMSE was used to select the optimal model using the smallest value.
The final value used for the model was nvmax = 4.
summary(stepwiseModel$finalModel)
Subset selection object
45 Variables  (and intercept)
                                             Forced in Forced out
Body.Typeobese                                   FALSE      FALSE
Body.Typeoverweight                              FALSE      FALSE
Body.Typeunderweight                             FALSE      FALSE
Sexmale                                          FALSE      FALSE
Dietpescatarian                                  FALSE      FALSE
Dietvegan                                        FALSE      FALSE
Dietvegetarian                                   FALSE      FALSE
How.Often.Showerless frequently                  FALSE      FALSE
How.Often.Showermore frequently                  FALSE      FALSE
How.Often.Showertwice a day                      FALSE      FALSE
Heating.Energy.Sourceelectricity                 FALSE      FALSE
Heating.Energy.Sourcenatural gas                 FALSE      FALSE
Heating.Energy.Sourcewood                        FALSE      FALSE
Transportpublic                                  FALSE      FALSE
Transportwalk/bicycle                            FALSE      FALSE
Vehicle.Typeelectric                             FALSE      FALSE
Vehicle.Typehybrid                               FALSE      FALSE
Vehicle.Typelpg                                  FALSE      FALSE
Vehicle.Typepetrol                               FALSE      FALSE
Social.Activityoften                             FALSE      FALSE
Social.Activitysometimes                         FALSE      FALSE
Monthly.Grocery.Bill                             FALSE      FALSE
Frequency.of.Traveling.by.Airnever               FALSE      FALSE
Frequency.of.Traveling.by.Airrarely              FALSE      FALSE
Frequency.of.Traveling.by.Airvery frequently     FALSE      FALSE
Vehicle.Monthly.Distance.Km                      FALSE      FALSE
Waste.Bag.Sizelarge                              FALSE      FALSE
Waste.Bag.Sizemedium                             FALSE      FALSE
Waste.Bag.Sizesmall                              FALSE      FALSE
Waste.Bag.Weekly.Count                           FALSE      FALSE
How.Long.TV.PC.Daily.Hour                        FALSE      FALSE
How.Many.New.Clothes.Monthly                     FALSE      FALSE
How.Long.Internet.Daily.Hour                     FALSE      FALSE
Energy.efficiencySometimes                       FALSE      FALSE
Energy.efficiencyYes                             FALSE      FALSE
Metal                                            FALSE      FALSE
Paper                                            FALSE      FALSE
Plastic                                          FALSE      FALSE
Glass                                            FALSE      FALSE
Stove                                            FALSE      FALSE
Oven                                             FALSE      FALSE
Microwave                                        FALSE      FALSE
Grill                                            FALSE      FALSE
Vehicle.TypeFuelEfficient                        FALSE      FALSE
Airfryer                                         FALSE      FALSE
1 subsets of each size up to 5
Selection Algorithm: backward
         Body.Typeobese Body.Typeoverweight Body.Typeunderweight Sexmale Dietpescatarian Dietvegan Dietvegetarian How.Often.Showerless frequently
1  ( 1 ) " "            " "                 " "                  " "     " "             " "       " "            " "                            
2  ( 1 ) " "            " "                 " "                  " "     " "             " "       " "            " "                            
3  ( 1 ) " "            " "                 " "                  " "     " "             " "       " "            " "                            
4  ( 1 ) " "            " "                 " "                  " "     " "             " "       " "            " "                            
5  ( 1 ) " "            " "                 " "                  " "     " "             " "       " "            " "                            
         How.Often.Showermore frequently How.Often.Showertwice a day Heating.Energy.Sourceelectricity Heating.Energy.Sourcenatural gas
1  ( 1 ) " "                             " "                         " "                              " "                             
2  ( 1 ) " "                             " "                         " "                              " "                             
3  ( 1 ) " "                             " "                         " "                              " "                             
4  ( 1 ) " "                             " "                         " "                              " "                             
5  ( 1 ) " "                             " "                         " "                              " "                             
         Heating.Energy.Sourcewood Transportpublic Transportwalk/bicycle Vehicle.Typeelectric Vehicle.TypeFuelEfficient Vehicle.Typehybrid Vehicle.Typelpg
1  ( 1 ) " "                       " "             " "                   " "                  " "                       " "                " "            
2  ( 1 ) " "                       " "             " "                   " "                  " "                       " "                " "            
3  ( 1 ) " "                       " "             " "                   "*"                  " "                       " "                " "            
4  ( 1 ) " "                       " "             " "                   "*"                  " "                       " "                " "            
5  ( 1 ) " "                       " "             " "                   "*"                  " "                       " "                " "            
         Vehicle.Typepetrol Social.Activityoften Social.Activitysometimes Monthly.Grocery.Bill Frequency.of.Traveling.by.Airnever
1  ( 1 ) " "                " "                  " "                      " "                  " "                               
2  ( 1 ) " "                " "                  " "                      " "                  " "                               
3  ( 1 ) " "                " "                  " "                      " "                  " "                               
4  ( 1 ) " "                " "                  " "                      " "                  " "                               
5  ( 1 ) " "                " "                  " "                      " "                  "*"                               
         Frequency.of.Traveling.by.Airrarely Frequency.of.Traveling.by.Airvery frequently Vehicle.Monthly.Distance.Km Waste.Bag.Sizelarge
1  ( 1 ) " "                                 " "                                          "*"                         " "                
2  ( 1 ) " "                                 "*"                                          "*"                         " "                
3  ( 1 ) " "                                 "*"                                          "*"                         " "                
4  ( 1 ) " "                                 "*"                                          "*"                         " "                
5  ( 1 ) " "                                 "*"                                          "*"                         " "                
         Waste.Bag.Sizemedium Waste.Bag.Sizesmall Waste.Bag.Weekly.Count How.Long.TV.PC.Daily.Hour How.Many.New.Clothes.Monthly How.Long.Internet.Daily.Hour
1  ( 1 ) " "                  " "                 " "                    " "                       " "                          " "                         
2  ( 1 ) " "                  " "                 " "                    " "                       " "                          " "                         
3  ( 1 ) " "                  " "                 " "                    " "                       " "                          " "                         
4  ( 1 ) " "                  " "                 " "                    " "                       "*"                          " "                         
5  ( 1 ) " "                  " "                 " "                    " "                       "*"                          " "                         
         Energy.efficiencySometimes Energy.efficiencyYes Metal Paper Plastic Glass Stove Oven Microwave Grill Airfryer
1  ( 1 ) " "                        " "                  " "   " "   " "     " "   " "   " "  " "       " "   " "     
2  ( 1 ) " "                        " "                  " "   " "   " "     " "   " "   " "  " "       " "   " "     
3  ( 1 ) " "                        " "                  " "   " "   " "     " "   " "   " "  " "       " "   " "     
4  ( 1 ) " "                        " "                  " "   " "   " "     " "   " "   " "  " "       " "   " "     
5  ( 1 ) " "                        " "                  " "   " "   " "     " "   " "   " "  " "       " "   " "     
colSums(is.na(carbonTrainData))
                    Body.Type                           Sex                          Diet              How.Often.Shower         Heating.Energy.Source 
                            0                             0                             0                             0                             0 
                    Transport                  Vehicle.Type               Social.Activity          Monthly.Grocery.Bill Frequency.of.Traveling.by.Air 
                            0                             0                             0                             0                             0 
  Vehicle.Monthly.Distance.Km                Waste.Bag.Size        Waste.Bag.Weekly.Count     How.Long.TV.PC.Daily.Hour  How.Many.New.Clothes.Monthly 
                            0                             0                             0                             0                             0 
 How.Long.Internet.Daily.Hour             Energy.efficiency                CarbonEmission                         Metal                         Paper 
                            0                             0                             0                             0                             0 
                      Plastic                         Glass                         Stove                          Oven                     Microwave 
                            0                             0                             0                             0                             0 
                        Grill                      Airfryer 
                            0                             0 

#Lasso Model

library(glmnet)
Loading required package: Matrix

Attaching package: ‘Matrix’

The following objects are masked from ‘package:tidyr’:

    expand, pack, unpack

Loaded glmnet 4.1-8
set.seed(1)
lassoModel<-train(CarbonEmission~.,data = carbonTrainData,method="glmnet",trControl= trainControl(method = "cv", number=5), tuneGrid = expand.grid(alpha=1, lambda=10^seq(-3,3,length=100))) 
Warning: There were missing values in resampled performance measures.
lassoModel
glmnet 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results across tuning parameters:

  lambda        RMSE       Rsquared   MAE       
  1.000000e-03  0.1219858  0.9232209  0.08693488
  1.149757e-03  0.1220753  0.9231430  0.08700875
  1.321941e-03  0.1221904  0.9230430  0.08710667
  1.519911e-03  0.1223470  0.9229054  0.08723849
  1.747528e-03  0.1225536  0.9227225  0.08741574
  2.009233e-03  0.1228246  0.9224806  0.08765246
  2.310130e-03  0.1231771  0.9221635  0.08796258
  2.656088e-03  0.1236457  0.9217360  0.08837683
  3.053856e-03  0.1242478  0.9211821  0.08890477
  3.511192e-03  0.1250325  0.9204489  0.08960023
  4.037017e-03  0.1260563  0.9194736  0.09051846
  4.641589e-03  0.1273779  0.9181895  0.09171832
  5.336699e-03  0.1290497  0.9165381  0.09323783
  6.135907e-03  0.1311917  0.9143534  0.09516533
  7.054802e-03  0.1337763  0.9116869  0.09744389
  8.111308e-03  0.1369090  0.9083708  0.10017575
  9.326033e-03  0.1403485  0.9047807  0.10315995
  1.072267e-02  0.1442686  0.9006956  0.10650907
  1.232847e-02  0.1491610  0.8953001  0.11072814
  1.417474e-02  0.1546006  0.8892623  0.11535445
  1.629751e-02  0.1598460  0.8840559  0.11952361
  1.873817e-02  0.1659921  0.8778439  0.12436408
  2.154435e-02  0.1736061  0.8694320  0.13042497
  2.477076e-02  0.1830635  0.8576678  0.13811167
  2.848036e-02  0.1936998  0.8436314  0.14679304
  3.274549e-02  0.2049344  0.8287289  0.15594333
  3.764936e-02  0.2172973  0.8115630  0.16606920
  4.328761e-02  0.2310128  0.7912730  0.17738897
  4.977024e-02  0.2477212  0.7609757  0.19110356
  5.722368e-02  0.2674957  0.7164689  0.20734886
  6.579332e-02  0.2889289  0.6589664  0.22517705
  7.564633e-02  0.3086367  0.6018407  0.24138756
  8.697490e-02  0.3233407  0.5674578  0.25346893
  1.000000e-01  0.3380457  0.5345973  0.26560896
  1.149757e-01  0.3519869  0.5106866  0.27729282
  1.321941e-01  0.3667388  0.4921571  0.28996428
  1.519911e-01  0.3835151  0.4705402  0.30453448
  1.747528e-01  0.4025545  0.4493947  0.32115299
  2.009233e-01  0.4247240  0.3409155  0.34036123
  2.310130e-01  0.4397931        NaN  0.35168920
  2.656088e-01  0.4397931        NaN  0.35168920
  3.053856e-01  0.4397931        NaN  0.35168920
  3.511192e-01  0.4397931        NaN  0.35168920
  4.037017e-01  0.4397931        NaN  0.35168920
  4.641589e-01  0.4397931        NaN  0.35168920
  5.336699e-01  0.4397931        NaN  0.35168920
  6.135907e-01  0.4397931        NaN  0.35168920
  7.054802e-01  0.4397931        NaN  0.35168920
  8.111308e-01  0.4397931        NaN  0.35168920
  9.326033e-01  0.4397931        NaN  0.35168920
  1.072267e+00  0.4397931        NaN  0.35168920
  1.232847e+00  0.4397931        NaN  0.35168920
  1.417474e+00  0.4397931        NaN  0.35168920
  1.629751e+00  0.4397931        NaN  0.35168920
  1.873817e+00  0.4397931        NaN  0.35168920
  2.154435e+00  0.4397931        NaN  0.35168920
  2.477076e+00  0.4397931        NaN  0.35168920
  2.848036e+00  0.4397931        NaN  0.35168920
  3.274549e+00  0.4397931        NaN  0.35168920
  3.764936e+00  0.4397931        NaN  0.35168920
  4.328761e+00  0.4397931        NaN  0.35168920
  4.977024e+00  0.4397931        NaN  0.35168920
  5.722368e+00  0.4397931        NaN  0.35168920
  6.579332e+00  0.4397931        NaN  0.35168920
  7.564633e+00  0.4397931        NaN  0.35168920
  8.697490e+00  0.4397931        NaN  0.35168920
  1.000000e+01  0.4397931        NaN  0.35168920
  1.149757e+01  0.4397931        NaN  0.35168920
  1.321941e+01  0.4397931        NaN  0.35168920
  1.519911e+01  0.4397931        NaN  0.35168920
  1.747528e+01  0.4397931        NaN  0.35168920
  2.009233e+01  0.4397931        NaN  0.35168920
  2.310130e+01  0.4397931        NaN  0.35168920
  2.656088e+01  0.4397931        NaN  0.35168920
  3.053856e+01  0.4397931        NaN  0.35168920
  3.511192e+01  0.4397931        NaN  0.35168920
  4.037017e+01  0.4397931        NaN  0.35168920
  4.641589e+01  0.4397931        NaN  0.35168920
  5.336699e+01  0.4397931        NaN  0.35168920
  6.135907e+01  0.4397931        NaN  0.35168920
  7.054802e+01  0.4397931        NaN  0.35168920
  8.111308e+01  0.4397931        NaN  0.35168920
  9.326033e+01  0.4397931        NaN  0.35168920
  1.072267e+02  0.4397931        NaN  0.35168920
  1.232847e+02  0.4397931        NaN  0.35168920
  1.417474e+02  0.4397931        NaN  0.35168920
  1.629751e+02  0.4397931        NaN  0.35168920
  1.873817e+02  0.4397931        NaN  0.35168920
  2.154435e+02  0.4397931        NaN  0.35168920
  2.477076e+02  0.4397931        NaN  0.35168920
  2.848036e+02  0.4397931        NaN  0.35168920
  3.274549e+02  0.4397931        NaN  0.35168920
  3.764936e+02  0.4397931        NaN  0.35168920
  4.328761e+02  0.4397931        NaN  0.35168920
  4.977024e+02  0.4397931        NaN  0.35168920
  5.722368e+02  0.4397931        NaN  0.35168920
  6.579332e+02  0.4397931        NaN  0.35168920
  7.564633e+02  0.4397931        NaN  0.35168920
  8.697490e+02  0.4397931        NaN  0.35168920
  1.000000e+03  0.4397931        NaN  0.35168920

Tuning parameter 'alpha' was held constant at a value of 1
RMSE was used to select the optimal model using the smallest value.
The final values used for the model were alpha = 1 and lambda = 0.001.
lassoLambda<-lassoModel$bestTune$lambda
lassoPredictor<- setdiff(names(carbonTrainData),"CarbonEmission")
lassoFinalModel<-glmnet(as.matrix(carbonTrainData[,lassoPredictor]),carbonTrainData[,"CarbonEmission"],alpha = 1,lambda = lassoLambda, family = "gaussian")
Warning: NAs introduced by coercion
coeff<-coef(lassoFinalModel)
coeff
27 x 1 sparse Matrix of class "dgCMatrix"
                                         s0
(Intercept)                    7.064707e+00
Body.Type                      .           
Sex                            .           
Diet                           .           
How.Often.Shower               .           
Heating.Energy.Source          .           
Transport                      .           
Vehicle.Type                   .           
Social.Activity                .           
Monthly.Grocery.Bill           5.264763e-04
Frequency.of.Traveling.by.Air  .           
Vehicle.Monthly.Distance.Km    8.078680e-05
Waste.Bag.Size                 .           
Waste.Bag.Weekly.Count         4.161160e-02
How.Long.TV.PC.Daily.Hour      1.146001e-03
How.Many.New.Clothes.Monthly   6.927597e-03
How.Long.Internet.Daily.Hour   3.612047e-03
Energy.efficiency              .           
Metal                         -6.556145e-02
Paper                         -7.009317e-02
Plastic                       -4.427840e-02
Glass                         -3.604447e-02
Stove                          8.269632e-03
Oven                           2.230270e-02
Microwave                      3.326151e-04
Grill                          1.333591e-02
Airfryer                       3.105221e-15
zeroCoeff<-coeff==0
zeroCoeff
27 x 1 Matrix of class "lgeMatrix"
                                 s0
(Intercept)                   FALSE
Body.Type                      TRUE
Sex                            TRUE
Diet                           TRUE
How.Often.Shower               TRUE
Heating.Energy.Source          TRUE
Transport                      TRUE
Vehicle.Type                   TRUE
Social.Activity                TRUE
Monthly.Grocery.Bill          FALSE
Frequency.of.Traveling.by.Air  TRUE
Vehicle.Monthly.Distance.Km   FALSE
Waste.Bag.Size                 TRUE
Waste.Bag.Weekly.Count        FALSE
How.Long.TV.PC.Daily.Hour     FALSE
How.Many.New.Clothes.Monthly  FALSE
How.Long.Internet.Daily.Hour  FALSE
Energy.efficiency              TRUE
Metal                         FALSE
Paper                         FALSE
Plastic                       FALSE
Glass                         FALSE
Stove                         FALSE
Oven                          FALSE
Microwave                     FALSE
Grill                         FALSE
Airfryer                      FALSE
plot(lassoModel)

#Ridge Model

set.seed(1)
ridgeModel<-train(CarbonEmission~.,data = carbonTrainData,method="glmnet",trControl= trainControl(method = "cv", number=5), tuneGrid = expand.grid(alpha=0, lambda=10^seq(-3,3,length=100))) 
Warning: There were missing values in resampled performance measures.
ridgeModel
glmnet 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results across tuning parameters:

  lambda        RMSE       Rsquared   MAE       
  1.000000e-03  0.1241596  0.9222100  0.08865090
  1.149757e-03  0.1241596  0.9222100  0.08865090
  1.321941e-03  0.1241596  0.9222100  0.08865090
  1.519911e-03  0.1241596  0.9222100  0.08865090
  1.747528e-03  0.1241596  0.9222100  0.08865090
  2.009233e-03  0.1241596  0.9222100  0.08865090
  2.310130e-03  0.1241596  0.9222100  0.08865090
  2.656088e-03  0.1241596  0.9222100  0.08865090
  3.053856e-03  0.1241596  0.9222100  0.08865090
  3.511192e-03  0.1241596  0.9222100  0.08865090
  4.037017e-03  0.1241596  0.9222100  0.08865090
  4.641589e-03  0.1241596  0.9222100  0.08865090
  5.336699e-03  0.1241596  0.9222100  0.08865090
  6.135907e-03  0.1241596  0.9222100  0.08865090
  7.054802e-03  0.1241596  0.9222100  0.08865090
  8.111308e-03  0.1241596  0.9222100  0.08865090
  9.326033e-03  0.1241596  0.9222100  0.08865090
  1.072267e-02  0.1241596  0.9222100  0.08865090
  1.232847e-02  0.1241596  0.9222100  0.08865090
  1.417474e-02  0.1241596  0.9222100  0.08865090
  1.629751e-02  0.1241596  0.9222100  0.08865090
  1.873817e-02  0.1241596  0.9222100  0.08865090
  2.154435e-02  0.1241596  0.9222100  0.08865090
  2.477076e-02  0.1245591  0.9220121  0.08897062
  2.848036e-02  0.1253433  0.9216210  0.08960188
  3.274549e-02  0.1263232  0.9211336  0.09040156
  3.764936e-02  0.1275324  0.9205351  0.09139279
  4.328761e-02  0.1290214  0.9197991  0.09261115
  4.977024e-02  0.1308283  0.9189101  0.09408119
  5.722368e-02  0.1330158  0.9178334  0.09587911
  6.579332e-02  0.1356200  0.9165564  0.09804399
  7.564633e-02  0.1387138  0.9150356  0.10063548
  8.697490e-02  0.1423210  0.9132671  0.10365698
  1.000000e-01  0.1465209  0.9111976  0.10718009
  1.149757e-01  0.1513120  0.9088398  0.11124324
  1.321941e-01  0.1567765  0.9061288  0.11590149
  1.519911e-01  0.1628775  0.9030968  0.12112912
  1.747528e-01  0.1696971  0.8996651  0.12698776
  2.009233e-01  0.1771493  0.8958995  0.13340871
  2.310130e-01  0.1853174  0.8916978  0.14044834
  2.656088e-01  0.1940616  0.8871586  0.14797845
  3.053856e-01  0.2034668  0.8821560  0.15602687
  3.511192e-01  0.2133348  0.8768295  0.16444914
  4.037017e-01  0.2237552  0.8710266  0.17329088
  4.641589e-01  0.2344738  0.8649369  0.18233732
  5.336699e-01  0.2455904  0.8583828  0.19165825
  6.135907e-01  0.2568040  0.8516140  0.20102781
  7.054802e-01  0.2682319  0.8444305  0.21057248
  8.111308e-01  0.2795432  0.8371551  0.22002436
  9.326033e-01  0.2908766  0.8295481  0.22946455
  1.072267e+00  0.3018907  0.8219941  0.23860783
  1.232847e+00  0.3127525  0.8142487  0.24762649
  1.417474e+00  0.3231279  0.8067264  0.25623745
  1.629751e+00  0.3332082  0.7991576  0.26457118
  1.873817e+00  0.3426839  0.7919688  0.27238956
  2.154435e+00  0.3517637  0.7848677  0.27986696
  2.477076e+00  0.3601737  0.7782639  0.28677239
  2.848036e+00  0.3681314  0.7718504  0.29329776
  3.274549e+00  0.3754039  0.7659986  0.29926155
  3.764936e+00  0.3822074  0.7603999  0.30482829
  4.328761e+00  0.3883513  0.7553751  0.30984318
  4.977024e+00  0.3940416  0.7506290  0.31448810
  5.722368e+00  0.3991267  0.7464272  0.31863716
  6.579332e+00  0.4037957  0.7424992  0.32244208
  7.564633e+00  0.4079310  0.7390616  0.32580772
  8.697490e+00  0.4117002  0.7358752  0.32887573
  1.000000e+01  0.4150136  0.7331118  0.33157274
  1.149757e+01  0.4180152  0.7305674  0.33401686
  1.321941e+01  0.4206378  0.7283766  0.33615249
  1.519911e+01  0.4230018  0.7263700  0.33807728
  1.747528e+01  0.4250570  0.7246521  0.33974864
  2.009233e+01  0.4269022  0.7230850  0.34124719
  2.310130e+01  0.4285000  0.7217491  0.34254381
  2.656088e+01  0.4299301  0.7205344  0.34370335
  3.053856e+01  0.4311646  0.7195024  0.34470425
  3.511192e+01  0.4322668  0.7185663  0.34559733
  4.037017e+01  0.4332159  0.7177731  0.34636608
  4.641589e+01  0.4340617  0.7170549  0.34705090
  5.336699e+01  0.4347886  0.7164475  0.34763924
  6.135907e+01  0.4354354  0.7158985  0.34816259
  7.054802e+01  0.4359905  0.7154352  0.34861180
  8.111308e+01  0.4364839  0.7150163  0.34901104
  9.326033e+01  0.4369069  0.7146629  0.34935317
  1.072267e+02  0.4372824  0.7143440  0.34965704
  1.232847e+02  0.4376042  0.7140753  0.34991749
  1.417474e+02  0.4378896  0.7138330  0.35014864
  1.629751e+02  0.4381340  0.7136289  0.35034645
  1.873817e+02  0.4383508  0.7134450  0.35052188
  2.154435e+02  0.4390209  0.7133272  0.35106476
  2.477076e+02  0.4397931        NaN  0.35168920
  2.848036e+02  0.4397931        NaN  0.35168920
  3.274549e+02  0.4397931        NaN  0.35168920
  3.764936e+02  0.4397931        NaN  0.35168920
  4.328761e+02  0.4397931        NaN  0.35168920
  4.977024e+02  0.4397931        NaN  0.35168920
  5.722368e+02  0.4397931        NaN  0.35168920
  6.579332e+02  0.4397931        NaN  0.35168920
  7.564633e+02  0.4397931        NaN  0.35168920
  8.697490e+02  0.4397931        NaN  0.35168920
  1.000000e+03  0.4397931        NaN  0.35168920

Tuning parameter 'alpha' was held constant at a value of 0
RMSE was used to select the optimal model using the smallest value.
The final values used for the model were alpha = 0 and lambda = 0.02154435.
ridgeLambda<-ridgeModel$bestTune$lambda
ridgePredictor<- setdiff(names(carbonTrainData),"CarbonEmission")
ridgeFinalModel<-glmnet(as.matrix(carbonTrainData[,ridgePredictor]),carbonTrainData[,"CarbonEmission"],alpha = 1,lambda = ridgeLambda, family = "gaussian")
Warning: NAs introduced by coercion
ridgeFinalModel

Call:  glmnet(x = as.matrix(carbonTrainData[, ridgePredictor]), y = carbonTrainData[,      "CarbonEmission"], family = "gaussian", alpha = 1, lambda = ridgeLambda) 

  Df  %Dev  Lambda
1  8 36.64 0.02154
plot(ridgeModel)

set.seed(1)
enetModel<-train(CarbonEmission~., data = carbonTrainData, method = "glmnet", trControl=trainControl(method="cv",number=5,preProc="nzv"),tuneGrid=expand.grid(alpha=seq(0,1,length=10),lambda=10^seq(-3,1,length=100)))
Warning: There were missing values in resampled performance measures.
enetModel
glmnet 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results across tuning parameters:

  alpha      lambda        RMSE       Rsquared   MAE       
  0.0000000   0.001000000  0.1241596  0.9222100  0.08865090
  0.0000000   0.001097499  0.1241596  0.9222100  0.08865090
  0.0000000   0.001204504  0.1241596  0.9222100  0.08865090
  0.0000000   0.001321941  0.1241596  0.9222100  0.08865090
  0.0000000   0.001450829  0.1241596  0.9222100  0.08865090
  0.0000000   0.001592283  0.1241596  0.9222100  0.08865090
  0.0000000   0.001747528  0.1241596  0.9222100  0.08865090
  0.0000000   0.001917910  0.1241596  0.9222100  0.08865090
  0.0000000   0.002104904  0.1241596  0.9222100  0.08865090
  0.0000000   0.002310130  0.1241596  0.9222100  0.08865090
  0.0000000   0.002535364  0.1241596  0.9222100  0.08865090
  0.0000000   0.002782559  0.1241596  0.9222100  0.08865090
  0.0000000   0.003053856  0.1241596  0.9222100  0.08865090
  0.0000000   0.003351603  0.1241596  0.9222100  0.08865090
  0.0000000   0.003678380  0.1241596  0.9222100  0.08865090
  0.0000000   0.004037017  0.1241596  0.9222100  0.08865090
  0.0000000   0.004430621  0.1241596  0.9222100  0.08865090
  0.0000000   0.004862602  0.1241596  0.9222100  0.08865090
  0.0000000   0.005336699  0.1241596  0.9222100  0.08865090
  0.0000000   0.005857021  0.1241596  0.9222100  0.08865090
  0.0000000   0.006428073  0.1241596  0.9222100  0.08865090
  0.0000000   0.007054802  0.1241596  0.9222100  0.08865090
  0.0000000   0.007742637  0.1241596  0.9222100  0.08865090
  0.0000000   0.008497534  0.1241596  0.9222100  0.08865090
  0.0000000   0.009326033  0.1241596  0.9222100  0.08865090
  0.0000000   0.010235310  0.1241596  0.9222100  0.08865090
  0.0000000   0.011233240  0.1241596  0.9222100  0.08865090
  0.0000000   0.012328467  0.1241596  0.9222100  0.08865090
  0.0000000   0.013530478  0.1241596  0.9222100  0.08865090
  0.0000000   0.014849683  0.1241596  0.9222100  0.08865090
  0.0000000   0.016297508  0.1241596  0.9222100  0.08865090
  0.0000000   0.017886495  0.1241596  0.9222100  0.08865090
  0.0000000   0.019630407  0.1241596  0.9222100  0.08865090
  0.0000000   0.021544347  0.1241596  0.9222100  0.08865090
  0.0000000   0.023644894  0.1243336  0.9221251  0.08878981
  0.0000000   0.025950242  0.1248007  0.9218916  0.08916443
  0.0000000   0.028480359  0.1253433  0.9216210  0.08960188
  0.0000000   0.031257158  0.1259718  0.9213084  0.09011437
  0.0000000   0.034304693  0.1266974  0.9209484  0.09070877
  0.0000000   0.037649358  0.1275324  0.9205351  0.09139279
  0.0000000   0.041320124  0.1284898  0.9200622  0.09217462
  0.0000000   0.045348785  0.1295836  0.9195229  0.09307200
  0.0000000   0.049770236  0.1308283  0.9189101  0.09408119
  0.0000000   0.054622772  0.1322389  0.9182166  0.09523806
  0.0000000   0.059948425  0.1338309  0.9174346  0.09655549
  0.0000000   0.065793322  0.1356200  0.9165564  0.09804399
  0.0000000   0.072208090  0.1376214  0.9155743  0.09971664
  0.0000000   0.079248290  0.1398503  0.9144804  0.10158536
  0.0000000   0.086974900  0.1423210  0.9132671  0.10365698
  0.0000000   0.095454846  0.1450470  0.9119271  0.10593910
  0.0000000   0.104761575  0.1480405  0.9104532  0.10846769
  0.0000000   0.114975700  0.1513120  0.9088398  0.11124324
  0.0000000   0.126185688  0.1548706  0.9070803  0.11427081
  0.0000000   0.138488637  0.1587242  0.9051676  0.11756418
  0.0000000   0.151991108  0.1628775  0.9030968  0.12112912
  0.0000000   0.166810054  0.1673331  0.9008642  0.12496092
  0.0000000   0.183073828  0.1720912  0.8984661  0.12905094
  0.0000000   0.200923300  0.1771493  0.8958995  0.13340871
  0.0000000   0.220513074  0.1825026  0.8931607  0.13802299
  0.0000000   0.242012826  0.1881435  0.8902477  0.14288382
  0.0000000   0.265608778  0.1940616  0.8871586  0.14797845
  0.0000000   0.291505306  0.2002439  0.8838925  0.15327021
  0.0000000   0.319926714  0.2066744  0.8804492  0.15876514
  0.0000000   0.351119173  0.2133348  0.8768295  0.16444914
  0.0000000   0.385352859  0.2202039  0.8730351  0.17028302
  0.0000000   0.422924287  0.2272587  0.8690693  0.17625470
  0.0000000   0.464158883  0.2344738  0.8649369  0.18233732
  0.0000000   0.509413801  0.2418221  0.8606442  0.18850337
  0.0000000   0.559081018  0.2492753  0.8561998  0.19473774
  0.0000000   0.613590727  0.2568040  0.8516140  0.20102781
  0.0000000   0.673415066  0.2643782  0.8468997  0.20735763
  0.0000000   0.739072203  0.2719680  0.8420750  0.21369430
  0.0000000   0.811130831  0.2795432  0.8371551  0.22002436
  0.0000000   0.890215085  0.2870737  0.8321512  0.22630021
  0.0000000   0.977009957  0.2945319  0.8270902  0.23250193
  0.0000000   1.072267222  0.3018907  0.8219941  0.23860783
  0.0000000   1.176811952  0.3091249  0.8168859  0.24461487
  0.0000000   1.291549665  0.3162111  0.8117888  0.25049901
  0.0000000   1.417474163  0.3231279  0.8067264  0.25623745
  0.0000000   1.555676144  0.3298564  0.8017212  0.26180204
  0.0000000   1.707352647  0.3363798  0.7967952  0.26719085
  0.0000000   1.873817423  0.3426839  0.7919688  0.27238956
  0.0000000   2.056512308  0.3487568  0.7872607  0.27739121
  0.0000000   2.257019720  0.3545891  0.7826876  0.28218926
  0.0000000   2.477076356  0.3601737  0.7782639  0.28677239
  0.0000000   2.718588243  0.3655059  0.7740017  0.29114498
  0.0000000   2.983647240  0.3705828  0.7699108  0.29530828
  0.0000000   3.274549163  0.3754039  0.7659986  0.29926155
  0.0000000   3.593813664  0.3799702  0.7622703  0.30299929
  0.0000000   3.944206059  0.3842846  0.7587287  0.30652481
  0.0000000   4.328761281  0.3883513  0.7553751  0.30984318
  0.0000000   4.750810162  0.3921759  0.7522089  0.31296488
  0.0000000   5.214008288  0.3957652  0.7492277  0.31589452
  0.0000000   5.722367659  0.3991267  0.7464272  0.31863716
  0.0000000   6.280291442  0.4022688  0.7438033  0.32119875
  0.0000000   6.892612104  0.4052005  0.7413502  0.32358524
  0.0000000   7.564633276  0.4079310  0.7390616  0.32580772
  0.0000000   8.302175681  0.4104702  0.7369306  0.32787458
  0.0000000   9.111627561  0.4128278  0.7349499  0.32979342
  0.0000000  10.000000000  0.4150136  0.7331118  0.33157274
  0.1111111   0.001000000  0.1217334  0.9234459  0.08677404
  0.1111111   0.001097499  0.1217334  0.9234459  0.08677404
  0.1111111   0.001204504  0.1217334  0.9234459  0.08677404
  0.1111111   0.001321941  0.1217334  0.9234459  0.08677404
  0.1111111   0.001450829  0.1217334  0.9234462  0.08677364
  0.1111111   0.001592283  0.1217397  0.9234421  0.08677424
  0.1111111   0.001747528  0.1217472  0.9234374  0.08677558
  0.1111111   0.001917910  0.1217557  0.9234320  0.08677780
  0.1111111   0.002104904  0.1217665  0.9234254  0.08678165
  0.1111111   0.002310130  0.1217774  0.9234192  0.08678479
  0.1111111   0.002535364  0.1217927  0.9234097  0.08679195
  0.1111111   0.002782559  0.1218112  0.9233983  0.08680125
  0.1111111   0.003053856  0.1218335  0.9233846  0.08681348
  0.1111111   0.003351603  0.1218601  0.9233682  0.08682921
  0.1111111   0.003678380  0.1218921  0.9233485  0.08684948
  0.1111111   0.004037017  0.1219305  0.9233250  0.08687637
  0.1111111   0.004430621  0.1219764  0.9232968  0.08690955
  0.1111111   0.004862602  0.1220311  0.9232633  0.08694945
  0.1111111   0.005336699  0.1220956  0.9232239  0.08699853
  0.1111111   0.005857021  0.1221727  0.9231779  0.08705854
  0.1111111   0.006428073  0.1222625  0.9231235  0.08712762
  0.1111111   0.007054802  0.1223716  0.9230582  0.08721088
  0.1111111   0.007742637  0.1225033  0.9229780  0.08731223
  0.1111111   0.008497534  0.1226601  0.9228823  0.08743698
  0.1111111   0.009326033  0.1228463  0.9227684  0.08758640
  0.1111111   0.010235310  0.1230668  0.9226335  0.08776182
  0.1111111   0.011233240  0.1233256  0.9224757  0.08796704
  0.1111111   0.012328467  0.1236361  0.9222828  0.08821292
  0.1111111   0.013530478  0.1239985  0.9220595  0.08850432
  0.1111111   0.014849683  0.1244299  0.9217901  0.08885240
  0.1111111   0.016297508  0.1249354  0.9214730  0.08927013
  0.1111111   0.017886495  0.1255325  0.9210947  0.08977170
  0.1111111   0.019630407  0.1262359  0.9206431  0.09036904
  0.1111111   0.021544347  0.1270514  0.9201159  0.09106724
  0.1111111   0.023644894  0.1280062  0.9194893  0.09188451
  0.1111111   0.025950242  0.1291143  0.9187547  0.09282390
  0.1111111   0.028480359  0.1303934  0.9178960  0.09391840
  0.1111111   0.031257158  0.1318723  0.9168851  0.09519223
  0.1111111   0.034304693  0.1335698  0.9157020  0.09664438
  0.1111111   0.037649358  0.1355141  0.9143136  0.09831634
  0.1111111   0.041320124  0.1377232  0.9127111  0.10017116
  0.1111111   0.045348785  0.1402113  0.9108721  0.10224274
  0.1111111   0.049770236  0.1430276  0.9087235  0.10459590
  0.1111111   0.054622772  0.1461284  0.9063414  0.10718878
  0.1111111   0.059948425  0.1494363  0.9038941  0.10994517
  0.1111111   0.065793322  0.1529902  0.9013442  0.11292117
  0.1111111   0.072208090  0.1567623  0.8987544  0.11610390
  0.1111111   0.079248290  0.1608681  0.8959800  0.11954882
  0.1111111   0.086974900  0.1653875  0.8928412  0.12334553
  0.1111111   0.095454846  0.1703359  0.8893017  0.12748123
  0.1111111   0.104761575  0.1756553  0.8854898  0.13194740
  0.1111111   0.114975700  0.1812419  0.8816911  0.13661277
  0.1111111   0.126185688  0.1872992  0.8774701  0.14171717
  0.1111111   0.138488637  0.1938499  0.8727256  0.14722242
  0.1111111   0.151991108  0.2009144  0.8673238  0.15314502
  0.1111111   0.166810054  0.2084710  0.8611874  0.15946882
  0.1111111   0.183073828  0.2165904  0.8540000  0.16627763
  0.1111111   0.200923300  0.2253002  0.8454287  0.17358788
  0.1111111   0.220513074  0.2344942  0.8355983  0.18126942
  0.1111111   0.242012826  0.2438593  0.8255182  0.18905006
  0.1111111   0.265608778  0.2535111  0.8147922  0.19704125
  0.1111111   0.291505306  0.2634364  0.8032475  0.20522311
  0.1111111   0.319926714  0.2737713  0.7898724  0.21375087
  0.1111111   0.351119173  0.2842888  0.7753295  0.22246859
  0.1111111   0.385352859  0.2949711  0.7595295  0.23134406
  0.1111111   0.422924287  0.3060137  0.7405776  0.24047033
  0.1111111   0.464158883  0.3174432  0.7170064  0.24990101
  0.1111111   0.509413801  0.3291159  0.6882222  0.25951702
  0.1111111   0.559081018  0.3405148  0.6571255  0.26895359
  0.1111111   0.613590727  0.3517377  0.6222571  0.27825887
  0.1111111   0.673415066  0.3618434  0.5940626  0.28668262
  0.1111111   0.739072203  0.3710416  0.5715066  0.29438372
  0.1111111   0.811130831  0.3793860  0.5563613  0.30128810
  0.1111111   0.890215085  0.3874181  0.5409958  0.30792701
  0.1111111   0.977009957  0.3950869  0.5281318  0.31427023
  0.1111111   1.072267222  0.4024619  0.5182535  0.32037846
  0.1111111   1.176811952  0.4097467  0.5038059  0.32646378
  0.1111111   1.291549665  0.4165295  0.4890961  0.33224260
  0.1111111   1.417474163  0.4231500  0.4611175  0.33789654
  0.1111111   1.555676144  0.4288538  0.4430035  0.34277159
  0.1111111   1.707352647  0.4341464  0.4012099  0.34727384
  0.1111111   1.873817423  0.4377089  0.2710946  0.35022545
  0.1111111   2.056512308  0.4397661  0.2502579  0.35167019
  0.1111111   2.257019720  0.4397931        NaN  0.35168920
  0.1111111   2.477076356  0.4397931        NaN  0.35168920
  0.1111111   2.718588243  0.4397931        NaN  0.35168920
  0.1111111   2.983647240  0.4397931        NaN  0.35168920
  0.1111111   3.274549163  0.4397931        NaN  0.35168920
  0.1111111   3.593813664  0.4397931        NaN  0.35168920
  0.1111111   3.944206059  0.4397931        NaN  0.35168920
  0.1111111   4.328761281  0.4397931        NaN  0.35168920
  0.1111111   4.750810162  0.4397931        NaN  0.35168920
  0.1111111   5.214008288  0.4397931        NaN  0.35168920
  0.1111111   5.722367659  0.4397931        NaN  0.35168920
  0.1111111   6.280291442  0.4397931        NaN  0.35168920
  0.1111111   6.892612104  0.4397931        NaN  0.35168920
  0.1111111   7.564633276  0.4397931        NaN  0.35168920
  0.1111111   8.302175681  0.4397931        NaN  0.35168920
  0.1111111   9.111627561  0.4397931        NaN  0.35168920
  0.1111111  10.000000000  0.4397931        NaN  0.35168920
 [ reached getOption("max.print") -- omitted 800 rows ]

RMSE was used to select the optimal model using the smallest value.
The final values used for the model were alpha = 0.1111111 and lambda = 0.001321941.
enetModel$bestTune
enetLambda<-enetModel$bestTune$lambda
enetAlpha<-enetModel$bestTune$alpha

enetPredector<-setdiff(names(carbonTrainData),"CarbonEmission")

enetFinalModel<-glmnet(as.matrix(carbonTrainData[,enetPredector]),carbonTrainData[,"CarbonEmission"], alpha = enetAlpha,lambda = enetLambda, family = "gaussian")
Warning: NAs introduced by coercion
enetFinalModel

Call:  glmnet(x = as.matrix(carbonTrainData[, enetPredector]), y = carbonTrainData[,      "CarbonEmission"], family = "gaussian", alpha = enetAlpha,      lambda = enetLambda) 

  Df  %Dev   Lambda
1 15 38.87 0.001322

#Random Forest Model

library(randomForest)
randomForest 4.7-1.1
Type rfNews() to see new features/changes/bug fixes.

Attaching package: ‘randomForest’

The following object is masked from ‘package:dplyr’:

    combine

The following object is masked from ‘package:ggplot2’:

    margin
set.seed(1)
randomForestModel<-randomForest(CarbonEmission~.,data = carbonTrainData)
randomForestModel

Call:
 randomForest(formula = CarbonEmission ~ ., data = carbonTrainData) 
               Type of random forest: regression
                     Number of trees: 500
No. of variables tried at each split: 8

          Mean of squared residuals: 0.01633518
                    % Var explained: 91.56
mRf<-train(CarbonEmission~.,
           data=carbonTrainData,
           method="rf",
           trControl=trainControl(method = "cv", number =5)
           )
mRf
Random Forest 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results across tuning parameters:

  mtry  RMSE       Rsquared   MAE      
   2    0.2675949  0.8091212  0.2085785
  23    0.1383621  0.9051976  0.1068202
  45    0.1421811  0.8973471  0.1099139

RMSE was used to select the optimal model using the smallest value.
The final value used for the model was mtry = 23.
varImp(mRf)
rf variable importance

  only 20 most important variables shown (out of 45)
rfPred<-predict(mRf,newdata = carbonTestData)

MAE(carbonTestData$CarbonEmission,rfPred)
[1] 0.1045264
rmse(carbonTestData$CarbonEmission,rfPred)
[1] 0.00120078
cor(carbonTestData$CarbonEmission,rfPred)^2
[1] 0.9072645
plot(carbonTestData$CarbonEmission,rfPred)

GBM

set.seed(1)

grBoostedTree<-train(
  CarbonEmission~.,
  data = carbonTrainData,
  method="gbm",
  trControl=trainControl(method = "cv",number = 5)
)
Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1840             nan     0.1000    0.0083
     2        0.1764             nan     0.1000    0.0078
     3        0.1696             nan     0.1000    0.0067
     4        0.1630             nan     0.1000    0.0063
     5        0.1574             nan     0.1000    0.0058
     6        0.1521             nan     0.1000    0.0052
     7        0.1475             nan     0.1000    0.0047
     8        0.1434             nan     0.1000    0.0039
     9        0.1392             nan     0.1000    0.0041
    10        0.1357             nan     0.1000    0.0032
    20        0.1098             nan     0.1000    0.0017
    40        0.0855             nan     0.1000    0.0009
    60        0.0696             nan     0.1000    0.0006
    80        0.0584             nan     0.1000    0.0005
   100        0.0502             nan     0.1000    0.0004
   120        0.0441             nan     0.1000    0.0002
   140        0.0395             nan     0.1000    0.0002
   150        0.0375             nan     0.1000    0.0002

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1765             nan     0.1000    0.0158
     2        0.1638             nan     0.1000    0.0126
     3        0.1535             nan     0.1000    0.0102
     4        0.1447             nan     0.1000    0.0082
     5        0.1374             nan     0.1000    0.0074
     6        0.1313             nan     0.1000    0.0060
     7        0.1249             nan     0.1000    0.0061
     8        0.1198             nan     0.1000    0.0053
     9        0.1148             nan     0.1000    0.0049
    10        0.1105             nan     0.1000    0.0043
    20        0.0804             nan     0.1000    0.0028
    40        0.0534             nan     0.1000    0.0009
    60        0.0394             nan     0.1000    0.0004
    80        0.0308             nan     0.1000    0.0003
   100        0.0256             nan     0.1000    0.0002
   120        0.0215             nan     0.1000    0.0001
   140        0.0187             nan     0.1000    0.0001
   150        0.0176             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1740             nan     0.1000    0.0187
     2        0.1583             nan     0.1000    0.0156
     3        0.1457             nan     0.1000    0.0125
     4        0.1349             nan     0.1000    0.0105
     5        0.1264             nan     0.1000    0.0083
     6        0.1184             nan     0.1000    0.0079
     7        0.1119             nan     0.1000    0.0062
     8        0.1057             nan     0.1000    0.0059
     9        0.1008             nan     0.1000    0.0049
    10        0.0961             nan     0.1000    0.0044
    20        0.0673             nan     0.1000    0.0019
    40        0.0412             nan     0.1000    0.0010
    60        0.0290             nan     0.1000    0.0004
    80        0.0224             nan     0.1000    0.0003
   100        0.0179             nan     0.1000    0.0001
   120        0.0146             nan     0.1000    0.0001
   140        0.0125             nan     0.1000    0.0001
   150        0.0116             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1856             nan     0.1000    0.0089
     2        0.1777             nan     0.1000    0.0076
     3        0.1703             nan     0.1000    0.0073
     4        0.1641             nan     0.1000    0.0062
     5        0.1583             nan     0.1000    0.0058
     6        0.1533             nan     0.1000    0.0048
     7        0.1482             nan     0.1000    0.0050
     8        0.1441             nan     0.1000    0.0040
     9        0.1404             nan     0.1000    0.0037
    10        0.1366             nan     0.1000    0.0039
    20        0.1116             nan     0.1000    0.0017
    40        0.0869             nan     0.1000    0.0008
    60        0.0712             nan     0.1000    0.0006
    80        0.0598             nan     0.1000    0.0004
   100        0.0514             nan     0.1000    0.0003
   120        0.0451             nan     0.1000    0.0002
   140        0.0405             nan     0.1000    0.0002
   150        0.0384             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1785             nan     0.1000    0.0156
     2        0.1654             nan     0.1000    0.0126
     3        0.1548             nan     0.1000    0.0105
     4        0.1459             nan     0.1000    0.0086
     5        0.1386             nan     0.1000    0.0071
     6        0.1323             nan     0.1000    0.0060
     7        0.1271             nan     0.1000    0.0052
     8        0.1219             nan     0.1000    0.0050
     9        0.1161             nan     0.1000    0.0058
    10        0.1117             nan     0.1000    0.0043
    20        0.0824             nan     0.1000    0.0016
    40        0.0548             nan     0.1000    0.0007
    60        0.0400             nan     0.1000    0.0005
    80        0.0308             nan     0.1000    0.0004
   100        0.0254             nan     0.1000    0.0002
   120        0.0213             nan     0.1000    0.0001
   140        0.0185             nan     0.1000    0.0001
   150        0.0174             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1760             nan     0.1000    0.0187
     2        0.1605             nan     0.1000    0.0159
     3        0.1476             nan     0.1000    0.0124
     4        0.1376             nan     0.1000    0.0102
     5        0.1286             nan     0.1000    0.0088
     6        0.1201             nan     0.1000    0.0081
     7        0.1133             nan     0.1000    0.0067
     8        0.1070             nan     0.1000    0.0064
     9        0.1019             nan     0.1000    0.0050
    10        0.0971             nan     0.1000    0.0048
    20        0.0689             nan     0.1000    0.0024
    40        0.0422             nan     0.1000    0.0007
    60        0.0300             nan     0.1000    0.0006
    80        0.0227             nan     0.1000    0.0002
   100        0.0181             nan     0.1000    0.0002
   120        0.0148             nan     0.1000    0.0001
   140        0.0126             nan     0.1000    0.0001
   150        0.0117             nan     0.1000    0.0000

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1849             nan     0.1000    0.0087
     2        0.1772             nan     0.1000    0.0079
     3        0.1697             nan     0.1000    0.0072
     4        0.1635             nan     0.1000    0.0062
     5        0.1574             nan     0.1000    0.0058
     6        0.1524             nan     0.1000    0.0046
     7        0.1472             nan     0.1000    0.0051
     8        0.1430             nan     0.1000    0.0040
     9        0.1389             nan     0.1000    0.0043
    10        0.1352             nan     0.1000    0.0035
    20        0.1105             nan     0.1000    0.0016
    40        0.0859             nan     0.1000    0.0010
    60        0.0702             nan     0.1000    0.0006
    80        0.0586             nan     0.1000    0.0004
   100        0.0504             nan     0.1000    0.0003
   120        0.0440             nan     0.1000    0.0002
   140        0.0394             nan     0.1000    0.0001
   150        0.0374             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1777             nan     0.1000    0.0156
     2        0.1646             nan     0.1000    0.0131
     3        0.1540             nan     0.1000    0.0103
     4        0.1448             nan     0.1000    0.0089
     5        0.1373             nan     0.1000    0.0073
     6        0.1309             nan     0.1000    0.0061
     7        0.1245             nan     0.1000    0.0064
     8        0.1194             nan     0.1000    0.0049
     9        0.1142             nan     0.1000    0.0053
    10        0.1102             nan     0.1000    0.0040
    20        0.0812             nan     0.1000    0.0018
    40        0.0536             nan     0.1000    0.0010
    60        0.0393             nan     0.1000    0.0005
    80        0.0308             nan     0.1000    0.0004
   100        0.0252             nan     0.1000    0.0001
   120        0.0213             nan     0.1000    0.0002
   140        0.0186             nan     0.1000    0.0001
   150        0.0175             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1741             nan     0.1000    0.0187
     2        0.1585             nan     0.1000    0.0155
     3        0.1461             nan     0.1000    0.0126
     4        0.1357             nan     0.1000    0.0103
     5        0.1255             nan     0.1000    0.0100
     6        0.1181             nan     0.1000    0.0075
     7        0.1111             nan     0.1000    0.0071
     8        0.1053             nan     0.1000    0.0056
     9        0.1002             nan     0.1000    0.0050
    10        0.0958             nan     0.1000    0.0045
    20        0.0675             nan     0.1000    0.0020
    40        0.0421             nan     0.1000    0.0007
    60        0.0296             nan     0.1000    0.0003
    80        0.0223             nan     0.1000    0.0003
   100        0.0181             nan     0.1000    0.0001
   120        0.0150             nan     0.1000    0.0001
   140        0.0126             nan     0.1000    0.0001
   150        0.0114             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1851             nan     0.1000    0.0086
     2        0.1775             nan     0.1000    0.0078
     3        0.1701             nan     0.1000    0.0073
     4        0.1635             nan     0.1000    0.0064
     5        0.1578             nan     0.1000    0.0057
     6        0.1527             nan     0.1000    0.0049
     7        0.1476             nan     0.1000    0.0049
     8        0.1436             nan     0.1000    0.0039
     9        0.1396             nan     0.1000    0.0039
    10        0.1356             nan     0.1000    0.0038
    20        0.1116             nan     0.1000    0.0018
    40        0.0869             nan     0.1000    0.0009
    60        0.0711             nan     0.1000    0.0005
    80        0.0596             nan     0.1000    0.0005
   100        0.0513             nan     0.1000    0.0003
   120        0.0452             nan     0.1000    0.0002
   140        0.0404             nan     0.1000    0.0001
   150        0.0384             nan     0.1000    0.0002

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1785             nan     0.1000    0.0155
     2        0.1654             nan     0.1000    0.0128
     3        0.1552             nan     0.1000    0.0102
     4        0.1464             nan     0.1000    0.0088
     5        0.1388             nan     0.1000    0.0076
     6        0.1324             nan     0.1000    0.0063
     7        0.1262             nan     0.1000    0.0064
     8        0.1208             nan     0.1000    0.0052
     9        0.1156             nan     0.1000    0.0053
    10        0.1110             nan     0.1000    0.0045
    20        0.0824             nan     0.1000    0.0016
    40        0.0554             nan     0.1000    0.0008
    60        0.0406             nan     0.1000    0.0005
    80        0.0321             nan     0.1000    0.0002
   100        0.0265             nan     0.1000    0.0003
   120        0.0222             nan     0.1000    0.0002
   140        0.0193             nan     0.1000    0.0001
   150        0.0180             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1748             nan     0.1000    0.0191
     2        0.1595             nan     0.1000    0.0156
     3        0.1467             nan     0.1000    0.0124
     4        0.1366             nan     0.1000    0.0097
     5        0.1272             nan     0.1000    0.0092
     6        0.1187             nan     0.1000    0.0084
     7        0.1122             nan     0.1000    0.0063
     8        0.1061             nan     0.1000    0.0061
     9        0.1011             nan     0.1000    0.0050
    10        0.0968             nan     0.1000    0.0040
    20        0.0688             nan     0.1000    0.0018
    40        0.0424             nan     0.1000    0.0007
    60        0.0301             nan     0.1000    0.0003
    80        0.0229             nan     0.1000    0.0002
   100        0.0185             nan     0.1000    0.0002
   120        0.0151             nan     0.1000    0.0002
   140        0.0129             nan     0.1000    0.0001
   150        0.0118             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1834             nan     0.1000    0.0087
     2        0.1759             nan     0.1000    0.0075
     3        0.1685             nan     0.1000    0.0070
     4        0.1627             nan     0.1000    0.0056
     5        0.1568             nan     0.1000    0.0062
     6        0.1518             nan     0.1000    0.0048
     7        0.1467             nan     0.1000    0.0049
     8        0.1426             nan     0.1000    0.0039
     9        0.1384             nan     0.1000    0.0042
    10        0.1348             nan     0.1000    0.0038
    20        0.1101             nan     0.1000    0.0018
    40        0.0860             nan     0.1000    0.0010
    60        0.0703             nan     0.1000    0.0006
    80        0.0589             nan     0.1000    0.0004
   100        0.0506             nan     0.1000    0.0003
   120        0.0445             nan     0.1000    0.0002
   140        0.0398             nan     0.1000    0.0002
   150        0.0379             nan     0.1000    0.0002

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1763             nan     0.1000    0.0158
     2        0.1632             nan     0.1000    0.0127
     3        0.1532             nan     0.1000    0.0101
     4        0.1441             nan     0.1000    0.0085
     5        0.1368             nan     0.1000    0.0071
     6        0.1307             nan     0.1000    0.0063
     7        0.1249             nan     0.1000    0.0058
     8        0.1195             nan     0.1000    0.0055
     9        0.1148             nan     0.1000    0.0046
    10        0.1102             nan     0.1000    0.0045
    20        0.0808             nan     0.1000    0.0018
    40        0.0540             nan     0.1000    0.0009
    60        0.0400             nan     0.1000    0.0004
    80        0.0315             nan     0.1000    0.0003
   100        0.0258             nan     0.1000    0.0002
   120        0.0217             nan     0.1000    0.0001
   140        0.0189             nan     0.1000    0.0001
   150        0.0178             nan     0.1000    0.0001

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1734             nan     0.1000    0.0186
     2        0.1582             nan     0.1000    0.0152
     3        0.1455             nan     0.1000    0.0127
     4        0.1352             nan     0.1000    0.0103
     5        0.1257             nan     0.1000    0.0094
     6        0.1173             nan     0.1000    0.0082
     7        0.1109             nan     0.1000    0.0063
     8        0.1051             nan     0.1000    0.0056
     9        0.1000             nan     0.1000    0.0052
    10        0.0952             nan     0.1000    0.0047
    20        0.0680             nan     0.1000    0.0018
    40        0.0416             nan     0.1000    0.0009
    60        0.0290             nan     0.1000    0.0004
    80        0.0218             nan     0.1000    0.0002
   100        0.0174             nan     0.1000    0.0001
   120        0.0141             nan     0.1000    0.0003
   140        0.0120             nan     0.1000    0.0001
   150        0.0111             nan     0.1000    0.0000

Iter   TrainDeviance   ValidDeviance   StepSize   Improve
     1        0.1743             nan     0.1000    0.0190
     2        0.1589             nan     0.1000    0.0156
     3        0.1461             nan     0.1000    0.0125
     4        0.1359             nan     0.1000    0.0102
     5        0.1262             nan     0.1000    0.0095
     6        0.1189             nan     0.1000    0.0073
     7        0.1115             nan     0.1000    0.0073
     8        0.1056             nan     0.1000    0.0056
     9        0.1005             nan     0.1000    0.0051
    10        0.0959             nan     0.1000    0.0046
    20        0.0678             nan     0.1000    0.0018
    40        0.0417             nan     0.1000    0.0007
    60        0.0294             nan     0.1000    0.0003
    80        0.0224             nan     0.1000    0.0003
   100        0.0179             nan     0.1000    0.0001
   120        0.0148             nan     0.1000    0.0001
   140        0.0125             nan     0.1000    0.0000
   150        0.0116             nan     0.1000    0.0001
grBoostedTree
Stochastic Gradient Boosting 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results across tuning parameters:

  interaction.depth  n.trees  RMSE       Rsquared   MAE       
  1                   50      0.2799859  0.6817099  0.21734673
  1                  100      0.2268772  0.7921416  0.17361732
  1                  150      0.1962693  0.8366385  0.14863782
  2                   50      0.2171188  0.8190082  0.16598681
  2                  100      0.1638148  0.8868431  0.12368291
  2                  150      0.1371742  0.9154801  0.10233794
  3                   50      0.1894213  0.8550887  0.14434186
  3                  100      0.1391537  0.9160350  0.10460569
  3                  150      0.1130848  0.9406167  0.08385162

Tuning parameter 'shrinkage' was held constant at a value of 0.1
Tuning parameter 'n.minobsinnode' was held constant at a value of 10
RMSE was used to select the optimal model using the smallest value.
The final values used for the model were n.trees = 150, interaction.depth = 3, shrinkage = 0.1 and n.minobsinnode = 10.
gbmPred<-predict(grBoostedTree, carbonTestData)
MAE(carbonTestData$CarbonEmission,gbmPred)
[1] 0.08074864
rmse(carbonTestData$CarbonEmission,gbmPred)
[1] 0.0002656894
cor(carbonTestData$CarbonEmission,gbmPred)^2
[1] 0.9431872
plot(carbonTestData$CarbonEmission,gbmPred)

#SV Linear Model

set.seed(1)

svmLinear<-train(
  CarbonEmission~.,
  data = carbonTrainData,
  method="svmLinear",
  trControl=trainControl(method = "cv",number = 5, preProc=c("center","scale"))
)
svmLinear
Support Vector Machines with Linear Kernel 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results:

  RMSE       Rsquared   MAE       
  0.1223375  0.9230945  0.08612926

Tuning parameter 'C' was held constant at a value of 1
svmPred<-predict(svmLinear,carbonTestData)

plot(svmPred,carbonTestData$CarbonEmission)

#SVM Radial Model

set.seed(1)

svmRadial<-train(
  CarbonEmission~.,
  data = carbonTrainData,
  method="svmRadial",
  trControl=trainControl(method = "cv",number = 5, preProc=c("center","scale"))
)
svmRadial
Support Vector Machines with Radial Basis Function Kernel 

8001 samples
  26 predictor

No pre-processing
Resampling: Cross-Validated (5 fold) 
Summary of sample sizes: 6401, 6401, 6401, 6400, 6401 
Resampling results across tuning parameters:

  C     RMSE        Rsquared   MAE       
  0.25  0.08070183  0.9684766  0.05543867
  0.50  0.06813583  0.9769891  0.04773321
  1.00  0.06065717  0.9813954  0.04355294

Tuning parameter 'sigma' was held constant at a value of 0.01193687
RMSE was used to select the optimal model using the smallest value.
The final values used for the model were sigma = 0.01193687 and C = 1.
svmRadialPred<-predict(svmRadial,carbonTestData)

plot(svmRadialPred,carbonTestData$CarbonEmission)

#Comparing models

compare=resamples(list(KNN=knnModel,LIN=lmModel,stepWise=stepwiseModel,Lasso=lassoModel,Ridge=ridgeModel,Enet=enetModel,RF=mRf,GBM=grBoostedTree,SVML=svmLinear,SVMR=svmRadial))
summary(compare) # Out of all the models SVM Radial stands out the most

Call:
summary.resamples(object = compare)

Models: KNN, LIN, stepWise, Lasso, Ridge, Enet, RF, GBM, SVML, SVMR 
Number of resamples: 5 

MAE 
               Min.    1st Qu.     Median       Mean    3rd Qu.       Max. NA's
KNN      0.29355718 0.30011897 0.30358891 0.30104742 0.30359972 0.30437235    0
LIN      0.08468189 0.08653726 0.08668226 0.08674322 0.08771081 0.08810388    0
stepWise 0.27076896 0.27236187 0.27267260 0.27275357 0.27295504 0.27500935    0
Lasso    0.08263733 0.08452922 0.08689541 0.08693488 0.08991372 0.09069875    0
Ridge    0.08461479 0.08660820 0.08863896 0.08865090 0.09132776 0.09206482    0
Enet     0.08278867 0.08412890 0.08658334 0.08677404 0.08993674 0.09043257    0
RF       0.10408932 0.10538108 0.10750605 0.10682021 0.10836087 0.10876372    0
GBM      0.07970661 0.08311406 0.08431983 0.08385162 0.08521232 0.08690528    0
SVML     0.08208710 0.08349344 0.08672664 0.08612926 0.08894605 0.08939306    0
SVMR     0.04203058 0.04268964 0.04371276 0.04355294 0.04405078 0.04528092    0

RMSE 
               Min.    1st Qu.     Median       Mean    3rd Qu.      Max. NA's
KNN      0.37295011 0.37736743 0.37851215 0.37897650 0.37913018 0.3869226    0
LIN      0.11743009 0.12058876 0.12318123 0.12169555 0.12340748 0.1238702    0
stepWise 0.34358303 0.34484586 0.34496809 0.34607902 0.34713797 0.3498602    0
Lasso    0.11662100 0.11703960 0.12364901 0.12198581 0.12624277 0.1263767    0
Ridge    0.11907983 0.11994269 0.12566449 0.12415964 0.12788642 0.1282248    0
Enet     0.11604668 0.11694091 0.12328642 0.12173339 0.12617744 0.1262155    0
RF       0.13408157 0.13614576 0.13894945 0.13836211 0.14047841 0.1421553    0
GBM      0.10685459 0.11180062 0.11338918 0.11308477 0.11588839 0.1174911    0
SVML     0.11628786 0.11705824 0.12519442 0.12233751 0.12626695 0.1268801    0
SVMR     0.05776811 0.05964871 0.06149819 0.06065717 0.06175535 0.0626155    0

Rsquared 
              Min.   1st Qu.    Median      Mean   3rd Qu.      Max. NA's
KNN      0.2543177 0.2631183 0.2650880 0.2674213 0.2767798 0.2778026    0
LIN      0.9185705 0.9216487 0.9246806 0.9235284 0.9253275 0.9274147    0
stepWise 0.3614847 0.3715791 0.3780496 0.3807424 0.3938578 0.3987405    0
Lasso    0.9170290 0.9192558 0.9230649 0.9232209 0.9279729 0.9287817    0
Ridge    0.9155089 0.9187241 0.9226270 0.9222100 0.9266525 0.9275377    0
Enet     0.9173033 0.9192842 0.9233576 0.9234459 0.9285877 0.9286968    0
RF       0.9002318 0.9022657 0.9053106 0.9051976 0.9090824 0.9090975    0
GBM      0.9376708 0.9388972 0.9390649 0.9406167 0.9404415 0.9470088    0
SVML     0.9174402 0.9185159 0.9219757 0.9230945 0.9287006 0.9288401    0
SVMR     0.9805681 0.9808349 0.9812240 0.9813954 0.9815897 0.9827600    0

Neural Network Preprocessing

library(caret)
carbonInd<-createDataPartition(carbonTrainData$CarbonEmission,p=0.9,list = FALSE)
carbonIndex<-which(names(carbonTrainData)=='CarbonEmission')

carbonTrainingData<-carbonTrainData[carbonInd,-carbonIndex]
str(carbonTrainingData)
'data.frame':   7202 obs. of  26 variables:
 $ Body.Type                    : Factor w/ 4 levels "normal","obese",..: 2 3 2 3 3 4 1 2 4 4 ...
 $ Sex                          : Factor w/ 2 levels "female","male": 1 2 1 2 2 1 1 2 1 1 ...
 $ Diet                         : Factor w/ 4 levels "omnivore","pescatarian",..: 4 1 4 4 1 2 4 4 1 3 ...
 $ How.Often.Shower             : Factor w/ 4 levels "daily","less frequently",..: 2 3 1 2 1 1 3 3 4 2 ...
 $ Heating.Energy.Source        : Factor w/ 4 levels "coal","electricity",..: 3 4 1 4 4 4 4 1 1 2 ...
 $ Transport                    : Factor w/ 3 levels "private","public",..: 3 1 1 2 2 2 2 3 3 1 ...
 $ Vehicle.Type                 : Factor w/ 6 levels "diesel","electric",..: 3 6 1 3 3 3 3 3 3 5 ...
 $ Social.Activity              : Factor w/ 3 levels "never","often",..: 2 1 2 3 1 2 1 1 2 3 ...
 $ Monthly.Grocery.Bill         : num  114 138 266 144 200 135 146 111 114 111 ...
 $ Frequency.of.Traveling.by.Air: Factor w/ 4 levels "frequently","never",..: 3 2 4 1 1 3 2 4 3 3 ...
 $ Vehicle.Monthly.Distance.Km  : num  9 2472 8457 658 1376 ...
 $ Waste.Bag.Size               : Factor w/ 4 levels "extra large",..: 1 4 2 2 3 1 1 3 2 2 ...
 $ Waste.Bag.Weekly.Count       : num  3 1 1 1 3 1 4 5 3 6 ...
 $ How.Long.TV.PC.Daily.Hour    : num  9 14 3 22 3 8 12 9 18 13 ...
 $ How.Many.New.Clothes.Monthly : num  38 47 5 18 31 23 27 4 27 16 ...
 $ How.Long.Internet.Daily.Hour : num  5 6 6 9 15 18 21 4 4 10 ...
 $ Energy.efficiency            : Factor w/ 3 levels "No","Sometimes",..: 1 2 3 2 3 2 1 2 3 2 ...
 $ Metal                        : num  1 1 0 1 0 0 0 0 0 1 ...
 $ Paper                        : num  0 0 1 1 0 0 1 0 0 0 ...
 $ Plastic                      : num  0 0 0 0 0 0 1 0 1 1 ...
 $ Glass                        : num  0 0 0 1 1 1 0 0 0 1 ...
 $ Stove                        : num  1 0 0 1 0 0 1 1 1 1 ...
 $ Oven                         : num  0 1 1 1 0 0 0 1 0 1 ...
 $ Microwave                    : num  1 1 0 1 1 1 1 1 0 1 ...
 $ Grill                        : num  0 0 0 0 1 1 0 0 0 1 ...
 $ Airfryer                     : num  0 0 0 0 1 1 0 0 0 1 ...
carbonTrainingLabels<-carbonTrainData[carbonInd,carbonIndex]
str(carbonTrainingLabels)
 num [1:7202] 7.55 7.86 8.46 7.41 7.82 ...
carbonValidationData<-carbonTrainData[-carbonInd,-carbonIndex]
carbonValidationData

carbonValidationLabels<-carbonTrainData[-carbonInd,carbonIndex]
str(carbonValidationLabels)
 num [1:799] 7.71 7.75 7.09 7.41 7.51 ...
carbonTestingData<-carbonTestData[,-carbonIndex]
carbonTestingData

carbonTestingLabels<-carbonTestData[,carbonIndex]
str(carbonTestingLabels)
 num [1:1999] 6.98 7.51 7.11 7.31 7.48 ...
dim(carbonTrainingData)
[1] 7202   26
dim(carbonTestingData)
[1] 1999   26

#Scaling numeric Variables and one hot encoding categorical variables

library(mltools)

Attaching package: ‘mltools’

The following object is masked _by_ ‘.GlobalEnv’:

    rmse

The following object is masked from ‘package:tidyr’:

    replace_na
library(data.table)
data.table 1.15.4 using 1 threads (see ?getDTthreads).  Latest news: r-datatable.com
**********
This installation of data.table has not detected OpenMP support. It should still work but in single-threaded mode.
This is a Mac. Please read https://mac.r-project.org/openmp/. Please engage with Apple and ask them for support. Check r-datatable.com for updates, and our Mac instructions here: https://github.com/Rdatatable/data.table/wiki/Installation. After several years of many reports of installation problems on Mac, it's time to gingerly point out that there have been no similar problems on Windows or Linux.
**********

Attaching package: ‘data.table’

The following objects are masked from ‘package:lubridate’:

    hour, isoweek, mday, minute, month, quarter, second, wday, week, yday, year

The following objects are masked from ‘package:dplyr’:

    between, first, last

The following object is masked from ‘package:purrr’:

    transpose
numericCols<-c("Monthly.Grocery.Bill","Vehicle.Monthly.Distance.Km","Waste.Bag.Weekly.Count",
               "How.Long.TV.PC.Daily.Hour","How.Many.New.Clothes.Monthly","How.Long.Internet.Daily.Hour","Metal","Paper","Plastic","Glass","Stove","Oven"
               ,"Microwave","Grill","Airfryer")

categoricalCols<-c("Body.Type","Sex","Diet","How.Often.Shower","Heating.Energy.Source","Transport","Vehicle.Type","Social.Activity",
                   "Frequency.of.Traveling.by.Air","Waste.Bag.Size","Energy.efficiency")

carbonTrainingDataNew<-scale(carbonTrainingData[,numericCols])
colMeanTrain<-attr(carbonTrainingDataNew,"scaled:center")
colStddevsTrain<-attr(carbonTrainingDataNew,"scaled:scale")


carbonTrainingData[,numericCols]<-carbonTrainingDataNew
carbonValidationData[,numericCols]<-scale(carbonValidationData[,numericCols],center = colMeanTrain,scale = colStddevsTrain)
carbonTestingData[,numericCols]<-scale(carbonTestingData[,numericCols],center = colMeanTrain,scale = colStddevsTrain)

carbonTrainingTable<-as.data.table(carbonTrainingData)
carbonValidationTable<-as.data.table(carbonValidationData)
carbonTestingTable<-as.data.table(carbonTestingData)

carbonTrainingOneHot<-one_hot(carbonTrainingTable,naCols=FALSE,dropCols=TRUE,dropUnusedLevels=TRUE)
carbonTrainingOneHot

carbonValidationOneHot<-one_hot(carbonValidationTable,naCols=FALSE,dropCols=TRUE,dropUnusedLevels=TRUE)
carbonValidationOneHot

carbonTestingOneHot<-one_hot(carbonTestingTable,naCols=FALSE,dropCols=TRUE,dropUnusedLevels=TRUE)
carbonTestingOneHot

carbonTrainingFinal<-as.data.frame(cbind(carbonTrainingTable[, ..numericCols], carbonTrainingOneHot))
carbonTrainingFinal

carbonValidationFinal<-as.data.frame(cbind(carbonValidationTable[, ..numericCols], carbonValidationOneHot))
carbonValidationFinal

carbonTestingFinal<-as.data.frame(cbind(carbonTestingTable[, ..numericCols], carbonTestingOneHot))
carbonTestingFinal
library(keras)

model<-keras_model_sequential()%>%
  layer_dense(units = 32,activation = "relu",input_shape = dim(carbonTrainingFinal)[2])%>%
  layer_dropout(rate=0.3)%>%
  layer_dense(units = 32,activation = "relu")%>%
  layer_dropout(rate=0.3)%>%
  layer_dense(units = 16,activation = "relu")%>%
  layer_dropout(rate=0.3)%>%
  layer_dense(units = 1)
/Users/angadsingh/.virtualenvs/r-tensorflow/lib/python3.9/site-packages/urllib3/__init__.py:35: NotOpenSSLWarning: urllib3 v2 only supports OpenSSL 1.1.1+, currently the 'ssl' module is compiled with 'LibreSSL 2.8.3'. See: https://github.com/urllib3/urllib3/issues/3020
  warnings.warn(
2024-05-06 09:54:21.056610: I metal_plugin/src/device/metal_device.cc:1154] Metal device set to: Apple M2 Pro
2024-05-06 09:54:21.056636: I metal_plugin/src/device/metal_device.cc:296] systemMemory: 16.00 GB
2024-05-06 09:54:21.056650: I metal_plugin/src/device/metal_device.cc:313] maxCacheSize: 5.33 GB
2024-05-06 09:54:21.056883: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:303] Could not identify NUMA node of platform GPU ID 0, defaulting to 0. Your kernel may not have been built with NUMA support.
2024-05-06 09:54:21.056910: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:269] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 0 MB memory) -> physical PluggableDevice (device: 0, name: METAL, pci bus id: <undefined>)
model %>% compile(
  loss="mse",
  optimizer=optimizer_adam(lr=0.001)
)
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.
history<-model %>% fit(as.matrix(carbonTrainingFinal),
                       carbonTrainingLabels,
                       batch_size=50,
                       epochs=20,
                       validation_data=list(as.matrix(carbonValidationFinal),carbonValidationLabels)
                         )
Epoch 1/20
2024-05-06 09:54:21.595932: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.

  1/145 [..............................] - ETA: 1:06 - loss: 85.0279
 11/145 [=>............................] - ETA: 0s - loss: 74.5109  
 22/145 [===>..........................] - ETA: 0s - loss: 65.8211
 33/145 [=====>........................] - ETA: 0s - loss: 58.4659
 44/145 [========>.....................] - ETA: 0s - loss: 51.6307
 55/145 [==========>...................] - ETA: 0s - loss: 45.7138
 66/145 [============>.................] - ETA: 0s - loss: 40.7416
 77/145 [==============>...............] - ETA: 0s - loss: 36.7929
 88/145 [=================>............] - ETA: 0s - loss: 33.7500
 99/145 [===================>..........] - ETA: 0s - loss: 31.1631
110/145 [=====================>........] - ETA: 0s - loss: 29.0696
122/145 [========================>.....] - ETA: 0s - loss: 27.1675
133/145 [==========================>...] - ETA: 0s - loss: 25.6250
144/145 [============================>.] - ETA: 0s - loss: 24.3227
145/145 [==============================] - 1s 5ms/step - loss: 24.3173
2024-05-06 09:54:22.694010: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.

145/145 [==============================] - 2s 10ms/step - loss: 24.3173 - val_loss: 1.5259
Epoch 2/20

  1/145 [..............................] - ETA: 1s - loss: 6.6958
 11/145 [=>............................] - ETA: 0s - loss: 8.4396
 20/145 [===>..........................] - ETA: 0s - loss: 8.1181
 31/145 [=====>........................] - ETA: 0s - loss: 8.2299
 42/145 [=======>......................] - ETA: 0s - loss: 8.1021
 53/145 [=========>....................] - ETA: 0s - loss: 7.8638
 65/145 [============>.................] - ETA: 0s - loss: 7.7672
 77/145 [==============>...............] - ETA: 0s - loss: 7.6358
 88/145 [=================>............] - ETA: 0s - loss: 7.4851
100/145 [===================>..........] - ETA: 0s - loss: 7.3771
111/145 [=====================>........] - ETA: 0s - loss: 7.2641
122/145 [========================>.....] - ETA: 0s - loss: 7.1383
133/145 [==========================>...] - ETA: 0s - loss: 7.0366
145/145 [==============================] - 1s 5ms/step - loss: 6.8839

145/145 [==============================] - 1s 6ms/step - loss: 6.8839 - val_loss: 0.9495
Epoch 3/20

  1/145 [..............................] - ETA: 0s - loss: 4.9090
 11/145 [=>............................] - ETA: 0s - loss: 5.4185
 17/145 [==>...........................] - ETA: 0s - loss: 5.4426
 28/145 [====>.........................] - ETA: 0s - loss: 5.2777
 39/145 [=======>......................] - ETA: 0s - loss: 5.1862
 50/145 [=========>....................] - ETA: 0s - loss: 5.1388
 61/145 [===========>..................] - ETA: 0s - loss: 5.0231
 72/145 [=============>................] - ETA: 0s - loss: 4.9419
 83/145 [================>.............] - ETA: 0s - loss: 4.8610
 94/145 [==================>...........] - ETA: 0s - loss: 4.8173
105/145 [====================>.........] - ETA: 0s - loss: 4.7526
116/145 [=======================>......] - ETA: 0s - loss: 4.6912
127/145 [=========================>....] - ETA: 0s - loss: 4.6309
138/145 [===========================>..] - ETA: 0s - loss: 4.5810
145/145 [==============================] - 1s 5ms/step - loss: 4.5351

145/145 [==============================] - 1s 6ms/step - loss: 4.5351 - val_loss: 0.9392
Epoch 4/20

  1/145 [..............................] - ETA: 0s - loss: 3.4896
 10/145 [=>............................] - ETA: 0s - loss: 3.5051
 21/145 [===>..........................] - ETA: 0s - loss: 3.5255
 32/145 [=====>........................] - ETA: 0s - loss: 3.3894
 43/145 [=======>......................] - ETA: 0s - loss: 3.3336
 54/145 [==========>...................] - ETA: 0s - loss: 3.2874
 65/145 [============>.................] - ETA: 0s - loss: 3.2213
 76/145 [==============>...............] - ETA: 0s - loss: 3.1978
 87/145 [=================>............] - ETA: 0s - loss: 3.1708
 98/145 [===================>..........] - ETA: 0s - loss: 3.1481
110/145 [=====================>........] - ETA: 0s - loss: 3.1023
121/145 [========================>.....] - ETA: 0s - loss: 3.0936
132/145 [==========================>...] - ETA: 0s - loss: 3.0388
143/145 [============================>.] - ETA: 0s - loss: 3.0222
145/145 [==============================] - 1s 5ms/step - loss: 3.0253

145/145 [==============================] - 1s 6ms/step - loss: 3.0253 - val_loss: 0.7579
Epoch 5/20

  1/145 [..............................] - ETA: 1s - loss: 2.7260
 10/145 [=>............................] - ETA: 0s - loss: 2.6184
 21/145 [===>..........................] - ETA: 0s - loss: 2.4973
 32/145 [=====>........................] - ETA: 0s - loss: 2.4977
 43/145 [=======>......................] - ETA: 0s - loss: 2.4523
 54/145 [==========>...................] - ETA: 0s - loss: 2.4487
 65/145 [============>.................] - ETA: 0s - loss: 2.4131
 76/145 [==============>...............] - ETA: 0s - loss: 2.3675
 87/145 [=================>............] - ETA: 0s - loss: 2.3607
 98/145 [===================>..........] - ETA: 0s - loss: 2.3361
109/145 [=====================>........] - ETA: 0s - loss: 2.3278
120/145 [=======================>......] - ETA: 0s - loss: 2.3313
131/145 [==========================>...] - ETA: 0s - loss: 2.3136
142/145 [============================>.] - ETA: 0s - loss: 2.2824
145/145 [==============================] - 1s 5ms/step - loss: 2.2719

145/145 [==============================] - 1s 6ms/step - loss: 2.2719 - val_loss: 0.6862
Epoch 6/20

  1/145 [..............................] - ETA: 0s - loss: 2.2501
 10/145 [=>............................] - ETA: 0s - loss: 1.9509
 22/145 [===>..........................] - ETA: 0s - loss: 1.9806
 33/145 [=====>........................] - ETA: 0s - loss: 1.8913
 44/145 [========>.....................] - ETA: 0s - loss: 1.8718
 56/145 [==========>...................] - ETA: 0s - loss: 1.8391
 67/145 [============>.................] - ETA: 0s - loss: 1.8632
 79/145 [===============>..............] - ETA: 0s - loss: 1.8733
 91/145 [=================>............] - ETA: 0s - loss: 1.8459
103/145 [====================>.........] - ETA: 0s - loss: 1.8374
114/145 [======================>.......] - ETA: 0s - loss: 1.8163
126/145 [=========================>....] - ETA: 0s - loss: 1.7893
137/145 [===========================>..] - ETA: 0s - loss: 1.7672
145/145 [==============================] - 1s 5ms/step - loss: 1.7592

145/145 [==============================] - 1s 6ms/step - loss: 1.7592 - val_loss: 0.4138
Epoch 7/20

  1/145 [..............................] - ETA: 1s - loss: 2.0760
 11/145 [=>............................] - ETA: 0s - loss: 1.5671
 23/145 [===>..........................] - ETA: 0s - loss: 1.4684
 34/145 [======>.......................] - ETA: 0s - loss: 1.4511
 45/145 [========>.....................] - ETA: 0s - loss: 1.4079
 52/145 [=========>....................] - ETA: 0s - loss: 1.3938
 64/145 [============>.................] - ETA: 0s - loss: 1.3979
 75/145 [==============>...............] - ETA: 0s - loss: 1.3779
 86/145 [================>.............] - ETA: 0s - loss: 1.3946
 94/145 [==================>...........] - ETA: 0s - loss: 1.3751
105/145 [====================>.........] - ETA: 0s - loss: 1.3624
116/145 [=======================>......] - ETA: 0s - loss: 1.3617
127/145 [=========================>....] - ETA: 0s - loss: 1.3410
138/145 [===========================>..] - ETA: 0s - loss: 1.3376
145/145 [==============================] - 1s 5ms/step - loss: 1.3315

145/145 [==============================] - 1s 6ms/step - loss: 1.3315 - val_loss: 0.2116
Epoch 8/20

  1/145 [..............................] - ETA: 1s - loss: 1.4665
 10/145 [=>............................] - ETA: 0s - loss: 1.0798
 21/145 [===>..........................] - ETA: 0s - loss: 1.0944
 32/145 [=====>........................] - ETA: 0s - loss: 1.0913
 43/145 [=======>......................] - ETA: 0s - loss: 1.0775
 54/145 [==========>...................] - ETA: 0s - loss: 1.0880
 65/145 [============>.................] - ETA: 0s - loss: 1.0771
 76/145 [==============>...............] - ETA: 0s - loss: 1.0927
 87/145 [=================>............] - ETA: 0s - loss: 1.0584
 98/145 [===================>..........] - ETA: 0s - loss: 1.0436
108/145 [=====================>........] - ETA: 0s - loss: 1.0315
119/145 [=======================>......] - ETA: 0s - loss: 1.0144
130/145 [=========================>....] - ETA: 0s - loss: 1.0013
141/145 [============================>.] - ETA: 0s - loss: 0.9879
145/145 [==============================] - 1s 5ms/step - loss: 0.9829

145/145 [==============================] - 1s 6ms/step - loss: 0.9829 - val_loss: 0.1225
Epoch 9/20

  1/145 [..............................] - ETA: 1s - loss: 1.2694
 11/145 [=>............................] - ETA: 0s - loss: 0.8976
 22/145 [===>..........................] - ETA: 0s - loss: 0.8978
 33/145 [=====>........................] - ETA: 0s - loss: 0.8537
 44/145 [========>.....................] - ETA: 0s - loss: 0.8798
 55/145 [==========>...................] - ETA: 0s - loss: 0.8644
 66/145 [============>.................] - ETA: 0s - loss: 0.8475
 77/145 [==============>...............] - ETA: 0s - loss: 0.8401
 88/145 [=================>............] - ETA: 0s - loss: 0.8332
 99/145 [===================>..........] - ETA: 0s - loss: 0.8231
110/145 [=====================>........] - ETA: 0s - loss: 0.8223
121/145 [========================>.....] - ETA: 0s - loss: 0.8254
132/145 [==========================>...] - ETA: 0s - loss: 0.8216
143/145 [============================>.] - ETA: 0s - loss: 0.8138
145/145 [==============================] - 1s 5ms/step - loss: 0.8132

145/145 [==============================] - 1s 6ms/step - loss: 0.8132 - val_loss: 0.1366
Epoch 10/20

  1/145 [..............................] - ETA: 0s - loss: 0.6305
 11/145 [=>............................] - ETA: 0s - loss: 0.6996
 22/145 [===>..........................] - ETA: 0s - loss: 0.7147
 33/145 [=====>........................] - ETA: 0s - loss: 0.7224
 44/145 [========>.....................] - ETA: 0s - loss: 0.7073
 55/145 [==========>...................] - ETA: 0s - loss: 0.7123
 66/145 [============>.................] - ETA: 0s - loss: 0.7080
 77/145 [==============>...............] - ETA: 0s - loss: 0.7004
 88/145 [=================>............] - ETA: 0s - loss: 0.7014
 99/145 [===================>..........] - ETA: 0s - loss: 0.6927
110/145 [=====================>........] - ETA: 0s - loss: 0.6911
121/145 [========================>.....] - ETA: 0s - loss: 0.6912
132/145 [==========================>...] - ETA: 0s - loss: 0.6887
143/145 [============================>.] - ETA: 0s - loss: 0.6831
145/145 [==============================] - 1s 5ms/step - loss: 0.6825

145/145 [==============================] - 1s 6ms/step - loss: 0.6825 - val_loss: 0.0467
Epoch 11/20

  1/145 [..............................] - ETA: 0s - loss: 0.6465
 11/145 [=>............................] - ETA: 0s - loss: 0.5282
 22/145 [===>..........................] - ETA: 0s - loss: 0.5759
 34/145 [======>.......................] - ETA: 0s - loss: 0.5810
 45/145 [========>.....................] - ETA: 0s - loss: 0.5751
 56/145 [==========>...................] - ETA: 0s - loss: 0.5829
 68/145 [=============>................] - ETA: 0s - loss: 0.5813
 78/145 [===============>..............] - ETA: 0s - loss: 0.5723
 89/145 [=================>............] - ETA: 0s - loss: 0.5741
100/145 [===================>..........] - ETA: 0s - loss: 0.5666
111/145 [=====================>........] - ETA: 0s - loss: 0.5653
122/145 [========================>.....] - ETA: 0s - loss: 0.5617
133/145 [==========================>...] - ETA: 0s - loss: 0.5558
140/145 [===========================>..] - ETA: 0s - loss: 0.5559
145/145 [==============================] - 1s 5ms/step - loss: 0.5537

145/145 [==============================] - 1s 6ms/step - loss: 0.5537 - val_loss: 0.0922
Epoch 12/20

  1/145 [..............................] - ETA: 1s - loss: 0.7519
 11/145 [=>............................] - ETA: 0s - loss: 0.6105
 21/145 [===>..........................] - ETA: 0s - loss: 0.5371
 32/145 [=====>........................] - ETA: 0s - loss: 0.5432
 43/145 [=======>......................] - ETA: 0s - loss: 0.5433
 54/145 [==========>...................] - ETA: 0s - loss: 0.5447
 65/145 [============>.................] - ETA: 0s - loss: 0.5194
 76/145 [==============>...............] - ETA: 0s - loss: 0.5137
 86/145 [================>.............] - ETA: 0s - loss: 0.5060
 97/145 [===================>..........] - ETA: 0s - loss: 0.5085
109/145 [=====================>........] - ETA: 0s - loss: 0.5060
120/145 [=======================>......] - ETA: 0s - loss: 0.5026
131/145 [==========================>...] - ETA: 0s - loss: 0.4971
142/145 [============================>.] - ETA: 0s - loss: 0.4918
145/145 [==============================] - 1s 5ms/step - loss: 0.4909

145/145 [==============================] - 1s 6ms/step - loss: 0.4909 - val_loss: 0.0450
Epoch 13/20

  1/145 [..............................] - ETA: 0s - loss: 0.6261
 11/145 [=>............................] - ETA: 0s - loss: 0.4821
 22/145 [===>..........................] - ETA: 0s - loss: 0.4862
 33/145 [=====>........................] - ETA: 0s - loss: 0.5095
 44/145 [========>.....................] - ETA: 0s - loss: 0.4789
 54/145 [==========>...................] - ETA: 0s - loss: 0.4762
 65/145 [============>.................] - ETA: 0s - loss: 0.4710
 76/145 [==============>...............] - ETA: 0s - loss: 0.4722
 88/145 [=================>............] - ETA: 0s - loss: 0.4706
 99/145 [===================>..........] - ETA: 0s - loss: 0.4628
110/145 [=====================>........] - ETA: 0s - loss: 0.4667
121/145 [========================>.....] - ETA: 0s - loss: 0.4659
132/145 [==========================>...] - ETA: 0s - loss: 0.4608
143/145 [============================>.] - ETA: 0s - loss: 0.4581
145/145 [==============================] - 1s 5ms/step - loss: 0.4568

145/145 [==============================] - 1s 6ms/step - loss: 0.4568 - val_loss: 0.0911
Epoch 14/20

  1/145 [..............................] - ETA: 0s - loss: 0.4953
 10/145 [=>............................] - ETA: 0s - loss: 0.4858
 21/145 [===>..........................] - ETA: 0s - loss: 0.4328
 32/145 [=====>........................] - ETA: 0s - loss: 0.4474
 43/145 [=======>......................] - ETA: 0s - loss: 0.4513
 54/145 [==========>...................] - ETA: 0s - loss: 0.4518
 65/145 [============>.................] - ETA: 0s - loss: 0.4367
 76/145 [==============>...............] - ETA: 0s - loss: 0.4293
 87/145 [=================>............] - ETA: 0s - loss: 0.4260
 98/145 [===================>..........] - ETA: 0s - loss: 0.4292
109/145 [=====================>........] - ETA: 0s - loss: 0.4227
120/145 [=======================>......] - ETA: 0s - loss: 0.4242
131/145 [==========================>...] - ETA: 0s - loss: 0.4206
142/145 [============================>.] - ETA: 0s - loss: 0.4148
145/145 [==============================] - 1s 5ms/step - loss: 0.4133

145/145 [==============================] - 1s 6ms/step - loss: 0.4133 - val_loss: 0.0772
Epoch 15/20

  1/145 [..............................] - ETA: 1s - loss: 0.3225
 11/145 [=>............................] - ETA: 0s - loss: 0.4031
 22/145 [===>..........................] - ETA: 0s - loss: 0.4304
 34/145 [======>.......................] - ETA: 0s - loss: 0.3984
 45/145 [========>.....................] - ETA: 0s - loss: 0.3968
 57/145 [==========>...................] - ETA: 0s - loss: 0.3994
 69/145 [=============>................] - ETA: 0s - loss: 0.3936
 80/145 [===============>..............] - ETA: 0s - loss: 0.3884
 91/145 [=================>............] - ETA: 0s - loss: 0.3848
102/145 [====================>.........] - ETA: 0s - loss: 0.3820
113/145 [======================>.......] - ETA: 0s - loss: 0.3771
124/145 [========================>.....] - ETA: 0s - loss: 0.3761
135/145 [==========================>...] - ETA: 0s - loss: 0.3766
145/145 [==============================] - 1s 5ms/step - loss: 0.3792

145/145 [==============================] - 1s 6ms/step - loss: 0.3792 - val_loss: 0.0515
Epoch 16/20

  1/145 [..............................] - ETA: 0s - loss: 0.4333
  7/145 [>.............................] - ETA: 1s - loss: 0.3213
 17/145 [==>...........................] - ETA: 0s - loss: 0.3330
 28/145 [====>.........................] - ETA: 0s - loss: 0.3366
 39/145 [=======>......................] - ETA: 0s - loss: 0.3336
 50/145 [=========>....................] - ETA: 0s - loss: 0.3355
 61/145 [===========>..................] - ETA: 0s - loss: 0.3441
 72/145 [=============>................] - ETA: 0s - loss: 0.3483
 83/145 [================>.............] - ETA: 0s - loss: 0.3544
 94/145 [==================>...........] - ETA: 0s - loss: 0.3546
105/145 [====================>.........] - ETA: 0s - loss: 0.3547
116/145 [=======================>......] - ETA: 0s - loss: 0.3565
127/145 [=========================>....] - ETA: 0s - loss: 0.3556
138/145 [===========================>..] - ETA: 0s - loss: 0.3537
145/145 [==============================] - 1s 5ms/step - loss: 0.3538

145/145 [==============================] - 1s 6ms/step - loss: 0.3538 - val_loss: 0.0277
Epoch 17/20

  1/145 [..............................] - ETA: 1s - loss: 0.4379
 10/145 [=>............................] - ETA: 0s - loss: 0.3668
 21/145 [===>..........................] - ETA: 0s - loss: 0.3373
 32/145 [=====>........................] - ETA: 0s - loss: 0.3471
 43/145 [=======>......................] - ETA: 0s - loss: 0.3520
 54/145 [==========>...................] - ETA: 0s - loss: 0.3575
 65/145 [============>.................] - ETA: 0s - loss: 0.3522
 76/145 [==============>...............] - ETA: 0s - loss: 0.3496
 88/145 [=================>............] - ETA: 0s - loss: 0.3481
 99/145 [===================>..........] - ETA: 0s - loss: 0.3508
110/145 [=====================>........] - ETA: 0s - loss: 0.3473
121/145 [========================>.....] - ETA: 0s - loss: 0.3453
133/145 [==========================>...] - ETA: 0s - loss: 0.3417
144/145 [============================>.] - ETA: 0s - loss: 0.3377
145/145 [==============================] - 1s 5ms/step - loss: 0.3376

145/145 [==============================] - 1s 6ms/step - loss: 0.3376 - val_loss: 0.0400
Epoch 18/20

  1/145 [..............................] - ETA: 0s - loss: 0.2621
 11/145 [=>............................] - ETA: 0s - loss: 0.3236
 22/145 [===>..........................] - ETA: 0s - loss: 0.3026
 33/145 [=====>........................] - ETA: 0s - loss: 0.3158
 44/145 [========>.....................] - ETA: 0s - loss: 0.3060
 52/145 [=========>....................] - ETA: 0s - loss: 0.3050
 63/145 [============>.................] - ETA: 0s - loss: 0.3010
 74/145 [==============>...............] - ETA: 0s - loss: 0.3037
 85/145 [================>.............] - ETA: 0s - loss: 0.3029
 97/145 [===================>..........] - ETA: 0s - loss: 0.3011
108/145 [=====================>........] - ETA: 0s - loss: 0.3014
119/145 [=======================>......] - ETA: 0s - loss: 0.3023
130/145 [=========================>....] - ETA: 0s - loss: 0.2992
141/145 [============================>.] - ETA: 0s - loss: 0.2982
145/145 [==============================] - 1s 5ms/step - loss: 0.2986

145/145 [==============================] - 1s 6ms/step - loss: 0.2986 - val_loss: 0.0562
Epoch 19/20

  1/145 [..............................] - ETA: 1s - loss: 0.2322
  9/145 [>.............................] - ETA: 0s - loss: 0.2712
 20/145 [===>..........................] - ETA: 0s - loss: 0.2604
 31/145 [=====>........................] - ETA: 0s - loss: 0.2721
 42/145 [=======>......................] - ETA: 0s - loss: 0.2718
 53/145 [=========>....................] - ETA: 0s - loss: 0.2729
 65/145 [============>.................] - ETA: 0s - loss: 0.2858
 77/145 [==============>...............] - ETA: 0s - loss: 0.2884
 89/145 [=================>............] - ETA: 0s - loss: 0.2912
100/145 [===================>..........] - ETA: 0s - loss: 0.2871
112/145 [======================>.......] - ETA: 0s - loss: 0.2887
123/145 [========================>.....] - ETA: 0s - loss: 0.2868
134/145 [==========================>...] - ETA: 0s - loss: 0.2868
145/145 [==============================] - 1s 5ms/step - loss: 0.2860

145/145 [==============================] - 1s 6ms/step - loss: 0.2860 - val_loss: 0.0389
Epoch 20/20

  1/145 [..............................] - ETA: 0s - loss: 0.1757
 11/145 [=>............................] - ETA: 0s - loss: 0.2787
 22/145 [===>..........................] - ETA: 0s - loss: 0.2725
 33/145 [=====>........................] - ETA: 0s - loss: 0.2725
 42/145 [=======>......................] - ETA: 0s - loss: 0.2681
 53/145 [=========>....................] - ETA: 0s - loss: 0.2618
 59/145 [===========>..................] - ETA: 0s - loss: 0.2600
 69/145 [=============>................] - ETA: 0s - loss: 0.2630
 80/145 [===============>..............] - ETA: 0s - loss: 0.2599
 91/145 [=================>............] - ETA: 0s - loss: 0.2597
102/145 [====================>.........] - ETA: 0s - loss: 0.2621
113/145 [======================>.......] - ETA: 0s - loss: 0.2597
124/145 [========================>.....] - ETA: 0s - loss: 0.2573
135/145 [==========================>...] - ETA: 0s - loss: 0.2576
145/145 [==============================] - 1s 5ms/step - loss: 0.2582

145/145 [==============================] - 1s 6ms/step - loss: 0.2582 - val_loss: 0.0932
kerasPrediction<-model %>% predict(as.matrix(carbonTestingFinal))
2024-05-06 09:54:39.726589: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.

 1/63 [..............................] - ETA: 4s
35/63 [===============>..............] - ETA: 0s
63/63 [==============================] - 0s 2ms/step

63/63 [==============================] - 0s 2ms/step
rmse=function(x,y){
  return((mean(x-y)^2)^0.5)
}

rmse(kerasPrediction,carbonTestLabels)
[1] 0.2110004
MAE(kerasPrediction,carbonTestLabels)
[1] 0.2486037
rsquared<-sum((kerasPrediction-carbonTestLabels)^2)/sum((carbonTestLabels-mean(carbonTestLabels))^2)
rsquared
[1] 0.473026
library(tfruns)
runs<-tuning_run(
  "carbonEmission.R",
  flags=list(
    learning_rate=c(0.1,0.5,0.01,0.001),
    nodes=c(8,16,32,64,128),
    batch_size=c(16,32,64,128),
    dropout=c(0.1,0.2,0.3,0.4,0.5),
    activation=c("relu")
  ),sample=0.05
)
400 total combinations of flags 
(sampled to 20 combinations)
y
Training run 1/20 (flags = list(0.5, 128, 128, 0.3, "relu")) 
Using run directory runs/2024-05-06T14-54-50Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:54:50.919664: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.0102s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.0102s). Check your callbacks.
2024-05-06 09:54:53.231240: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
57/57 - 4s - loss: 11.6138 - val_loss: 0.4636 - 4s/epoch - 62ms/step
Epoch 2/20
57/57 - 0s - loss: 2.8228 - val_loss: 0.4192 - 429ms/epoch - 8ms/step
Epoch 3/20
57/57 - 1s - loss: 1.9487 - val_loss: 0.5122 - 965ms/epoch - 17ms/step
Epoch 4/20
57/57 - 1s - loss: 1.4302 - val_loss: 0.3564 - 707ms/epoch - 12ms/step
Epoch 5/20
57/57 - 0s - loss: 1.0963 - val_loss: 0.3395 - 419ms/epoch - 7ms/step
Epoch 6/20
57/57 - 1s - loss: 0.8655 - val_loss: 0.3819 - 699ms/epoch - 12ms/step
Epoch 7/20
57/57 - 1s - loss: 0.6999 - val_loss: 0.2920 - 1s/epoch - 20ms/step
Epoch 8/20
57/57 - 1s - loss: 0.5871 - val_loss: 0.5491 - 704ms/epoch - 12ms/step
Epoch 9/20
57/57 - 0s - loss: 0.5172 - val_loss: 0.4968 - 421ms/epoch - 7ms/step
Epoch 10/20
57/57 - 1s - loss: 0.4489 - val_loss: 0.2608 - 659ms/epoch - 12ms/step
Epoch 11/20
57/57 - 1s - loss: 0.4264 - val_loss: 0.6395 - 698ms/epoch - 12ms/step
Epoch 12/20
57/57 - 1s - loss: 0.3757 - val_loss: 0.5813 - 501ms/epoch - 9ms/step
Epoch 13/20
57/57 - 1s - loss: 0.3551 - val_loss: 0.7431 - 691ms/epoch - 12ms/step
Epoch 14/20
57/57 - 0s - loss: 0.3338 - val_loss: 0.6908 - 445ms/epoch - 8ms/step
Epoch 15/20
57/57 - 1s - loss: 0.3122 - val_loss: 0.6215 - 660ms/epoch - 12ms/step
Epoch 16/20
57/57 - 1s - loss: 0.3136 - val_loss: 0.7349 - 957ms/epoch - 17ms/step
Epoch 17/20
57/57 - 1s - loss: 0.2970 - val_loss: 0.8001 - 908ms/epoch - 16ms/step
Epoch 18/20
57/57 - 0s - loss: 0.2884 - val_loss: 0.9459 - 428ms/epoch - 8ms/step
Epoch 19/20
57/57 - 1s - loss: 0.3036 - val_loss: 0.8340 - 694ms/epoch - 12ms/step
Epoch 20/20
57/57 - 0s - loss: 0.3165 - val_loss: 0.5513 - 455ms/epoch - 8ms/step

Run completed: runs/2024-05-06T14-54-50Z

Training run 2/20 (flags = list(0.5, 8, 128, 0.5, "relu")) 
Using run directory runs/2024-05-06T14-55-07Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:55:07.670563: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.3833s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.3833s). Check your callbacks.
2024-05-06 09:55:10.590925: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
57/57 - 4s - loss: 58.2908 - val_loss: 44.7017 - 4s/epoch - 67ms/step
Epoch 2/20
57/57 - 0s - loss: 44.4467 - val_loss: 32.0859 - 434ms/epoch - 8ms/step
Epoch 3/20
57/57 - 1s - loss: 34.2047 - val_loss: 25.5460 - 969ms/epoch - 17ms/step
Epoch 4/20
57/57 - 1s - loss: 29.2576 - val_loss: 24.3943 - 697ms/epoch - 12ms/step
Epoch 5/20
57/57 - 0s - loss: 26.4547 - val_loss: 24.6113 - 410ms/epoch - 7ms/step
Epoch 6/20
57/57 - 1s - loss: 24.1613 - val_loss: 24.0585 - 696ms/epoch - 12ms/step
Epoch 7/20
57/57 - 1s - loss: 21.2079 - val_loss: 25.2184 - 844ms/epoch - 15ms/step
Epoch 8/20
57/57 - 0s - loss: 18.4006 - val_loss: 28.0411 - 422ms/epoch - 7ms/step
Epoch 9/20
57/57 - 1s - loss: 15.9729 - val_loss: 30.0195 - 912ms/epoch - 16ms/step
Epoch 10/20
57/57 - 0s - loss: 14.3609 - val_loss: 31.8455 - 438ms/epoch - 8ms/step
Epoch 11/20
57/57 - 0s - loss: 13.1000 - val_loss: 32.6619 - 414ms/epoch - 7ms/step
Epoch 12/20
57/57 - 1s - loss: 12.2610 - val_loss: 33.3808 - 931ms/epoch - 16ms/step
Epoch 13/20
57/57 - 1s - loss: 11.6125 - val_loss: 34.1868 - 688ms/epoch - 12ms/step
Epoch 14/20
57/57 - 0s - loss: 11.0342 - val_loss: 33.9786 - 424ms/epoch - 7ms/step
Epoch 15/20
57/57 - 1s - loss: 10.6773 - val_loss: 34.4642 - 710ms/epoch - 12ms/step
Epoch 16/20
57/57 - 0s - loss: 10.2095 - val_loss: 34.4586 - 422ms/epoch - 7ms/step
Epoch 17/20
57/57 - 1s - loss: 9.8543 - val_loss: 34.6746 - 699ms/epoch - 12ms/step
Epoch 18/20
57/57 - 1s - loss: 9.6432 - val_loss: 34.5086 - 833ms/epoch - 15ms/step
Epoch 19/20
57/57 - 0s - loss: 9.3112 - val_loss: 34.7509 - 433ms/epoch - 8ms/step
Epoch 20/20
57/57 - 1s - loss: 9.0224 - val_loss: 34.3791 - 698ms/epoch - 12ms/step

Run completed: runs/2024-05-06T14-55-07Z

Training run 3/20 (flags = list(0.5, 16, 16, 0.1, "relu")) 
Using run directory runs/2024-05-06T14-55-23Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:55:23.897602: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0053s vs `on_train_batch_end` time: 0.0096s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0053s vs `on_train_batch_end` time: 0.0096s). Check your callbacks.
2024-05-06 09:55:29.146733: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
451/451 - 6s - loss: 7.2410 - val_loss: 1.0509 - 6s/epoch - 14ms/step
Epoch 2/20
451/451 - 3s - loss: 0.9180 - val_loss: 2.2882 - 3s/epoch - 8ms/step
Epoch 3/20
451/451 - 3s - loss: 0.2663 - val_loss: 3.0453 - 3s/epoch - 7ms/step
Epoch 4/20
451/451 - 3s - loss: 0.1253 - val_loss: 3.1514 - 3s/epoch - 7ms/step
Epoch 5/20
451/451 - 3s - loss: 0.0965 - val_loss: 4.3244 - 3s/epoch - 7ms/step
Epoch 6/20
451/451 - 3s - loss: 0.0802 - val_loss: 3.3610 - 3s/epoch - 7ms/step
Epoch 7/20
451/451 - 3s - loss: 0.0667 - val_loss: 3.5523 - 3s/epoch - 7ms/step
Epoch 8/20
451/451 - 3s - loss: 0.0601 - val_loss: 3.6344 - 3s/epoch - 7ms/step
Epoch 9/20
451/451 - 3s - loss: 0.0521 - val_loss: 3.9413 - 3s/epoch - 7ms/step
Epoch 10/20
451/451 - 3s - loss: 0.0468 - val_loss: 3.2592 - 3s/epoch - 7ms/step
Epoch 11/20
451/451 - 3s - loss: 0.0412 - val_loss: 3.7525 - 3s/epoch - 7ms/step
Epoch 12/20
451/451 - 3s - loss: 0.0387 - val_loss: 4.1755 - 3s/epoch - 7ms/step
Epoch 13/20
451/451 - 3s - loss: 0.0367 - val_loss: 3.7769 - 3s/epoch - 7ms/step
Epoch 14/20
451/451 - 3s - loss: 0.0360 - val_loss: 3.3005 - 3s/epoch - 7ms/step
Epoch 15/20
451/451 - 3s - loss: 0.0368 - val_loss: 3.6954 - 3s/epoch - 7ms/step
Epoch 16/20
451/451 - 3s - loss: 0.0402 - val_loss: 3.3595 - 3s/epoch - 6ms/step
Epoch 17/20
451/451 - 3s - loss: 0.0483 - val_loss: 3.6650 - 3s/epoch - 6ms/step
Epoch 18/20
451/451 - 3s - loss: 0.0462 - val_loss: 3.5981 - 3s/epoch - 6ms/step
Epoch 19/20
451/451 - 3s - loss: 0.0549 - val_loss: 3.5653 - 3s/epoch - 6ms/step
Epoch 20/20
451/451 - 3s - loss: 0.0733 - val_loss: 3.1187 - 3s/epoch - 6ms/step

Run completed: runs/2024-05-06T14-55-23Z

Training run 4/20 (flags = list(0.001, 32, 16, 0.3, "relu")) 
Using run directory runs/2024-05-06T14-56-27Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:56:30.379454: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_begin` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_begin` time: 0.0334s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_begin` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_begin` time: 0.0334s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.0100s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.0100s). Check your callbacks.
2024-05-06 09:56:33.239231: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
451/451 - 4s - loss: 8.5746 - val_loss: 1.1113 - 4s/epoch - 8ms/step
Epoch 2/20
451/451 - 3s - loss: 1.4460 - val_loss: 1.0462 - 3s/epoch - 7ms/step
Epoch 3/20
451/451 - 3s - loss: 0.9457 - val_loss: 1.1352 - 3s/epoch - 7ms/step
Epoch 4/20
451/451 - 3s - loss: 0.5666 - val_loss: 1.3286 - 3s/epoch - 6ms/step
Epoch 5/20
451/451 - 3s - loss: 0.3588 - val_loss: 1.2885 - 3s/epoch - 6ms/step
Epoch 6/20
451/451 - 3s - loss: 0.2648 - val_loss: 1.2140 - 3s/epoch - 6ms/step
Epoch 7/20
451/451 - 3s - loss: 0.1982 - val_loss: 1.0452 - 3s/epoch - 6ms/step
Epoch 8/20
451/451 - 3s - loss: 0.1835 - val_loss: 0.7532 - 3s/epoch - 6ms/step
Epoch 9/20
451/451 - 3s - loss: 0.1912 - val_loss: 1.4421 - 3s/epoch - 7ms/step
Epoch 10/20
451/451 - 3s - loss: 0.3299 - val_loss: 1.2238 - 3s/epoch - 6ms/step
Epoch 11/20
451/451 - 3s - loss: 0.5239 - val_loss: 1.2345 - 3s/epoch - 6ms/step
Epoch 12/20
451/451 - 3s - loss: 0.9646 - val_loss: 1.5795 - 3s/epoch - 6ms/step
Epoch 13/20
451/451 - 3s - loss: 1.0706 - val_loss: 1.3688 - 3s/epoch - 6ms/step
Epoch 14/20
451/451 - 3s - loss: 1.5272 - val_loss: 1.5513 - 3s/epoch - 6ms/step
Epoch 15/20
451/451 - 3s - loss: 1.3189 - val_loss: 4.9670 - 3s/epoch - 6ms/step
Epoch 16/20
451/451 - 3s - loss: 0.9311 - val_loss: 6.6251 - 3s/epoch - 6ms/step
Epoch 17/20
451/451 - 3s - loss: 1.0078 - val_loss: 11.9357 - 3s/epoch - 6ms/step
Epoch 18/20
451/451 - 3s - loss: 1.7541 - val_loss: 11.9248 - 3s/epoch - 6ms/step
Epoch 19/20
451/451 - 3s - loss: 1.6260 - val_loss: 12.3699 - 3s/epoch - 6ms/step
Epoch 20/20
451/451 - 3s - loss: 1.9291 - val_loss: 20.7996 - 3s/epoch - 6ms/step

Run completed: runs/2024-05-06T14-56-27Z

Training run 5/20 (flags = list(0.1, 32, 128, 0.3, "relu")) 
Using run directory runs/2024-05-06T14-57-29Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:57:29.769069: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.1481s vs `on_train_batch_end` time: 0.1940s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.1481s vs `on_train_batch_end` time: 0.1940s). Check your callbacks.
2024-05-06 09:57:32.796985: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
57/57 - 4s - loss: 27.3645 - val_loss: 3.1691 - 4s/epoch - 67ms/step
Epoch 2/20
57/57 - 0s - loss: 9.7090 - val_loss: 2.9526 - 431ms/epoch - 8ms/step
Epoch 3/20
57/57 - 1s - loss: 7.3945 - val_loss: 3.3290 - 862ms/epoch - 15ms/step
Epoch 4/20
57/57 - 1s - loss: 6.3382 - val_loss: 3.0386 - 671ms/epoch - 12ms/step
Epoch 5/20
57/57 - 0s - loss: 5.4127 - val_loss: 2.7711 - 423ms/epoch - 7ms/step
Epoch 6/20
57/57 - 1s - loss: 4.6305 - val_loss: 2.7949 - 657ms/epoch - 12ms/step
Epoch 7/20
57/57 - 0s - loss: 3.7744 - val_loss: 2.5571 - 421ms/epoch - 7ms/step
Epoch 8/20
57/57 - 1s - loss: 3.1616 - val_loss: 2.8495 - 826ms/epoch - 14ms/step
Epoch 9/20
57/57 - 1s - loss: 2.7162 - val_loss: 3.3917 - 764ms/epoch - 13ms/step
Epoch 10/20
57/57 - 0s - loss: 2.4613 - val_loss: 3.4641 - 421ms/epoch - 7ms/step
Epoch 11/20
57/57 - 1s - loss: 2.1948 - val_loss: 4.1984 - 662ms/epoch - 12ms/step
Epoch 12/20
57/57 - 0s - loss: 1.8854 - val_loss: 3.9335 - 417ms/epoch - 7ms/step
Epoch 13/20
57/57 - 1s - loss: 1.6623 - val_loss: 4.9280 - 662ms/epoch - 12ms/step
Epoch 14/20
57/57 - 0s - loss: 1.5136 - val_loss: 5.0246 - 421ms/epoch - 7ms/step
Epoch 15/20
57/57 - 1s - loss: 1.4015 - val_loss: 5.3457 - 621ms/epoch - 11ms/step
Epoch 16/20
57/57 - 0s - loss: 1.2787 - val_loss: 5.9332 - 420ms/epoch - 7ms/step
Epoch 17/20
57/57 - 0s - loss: 1.1320 - val_loss: 5.2903 - 418ms/epoch - 7ms/step
Epoch 18/20
57/57 - 1s - loss: 1.0409 - val_loss: 5.9916 - 972ms/epoch - 17ms/step
Epoch 19/20
57/57 - 0s - loss: 0.9905 - val_loss: 5.5906 - 451ms/epoch - 8ms/step
Epoch 20/20
57/57 - 1s - loss: 0.8973 - val_loss: 5.6969 - 634ms/epoch - 11ms/step

Run completed: runs/2024-05-06T14-57-29Z

Training run 6/20 (flags = list(0.1, 8, 128, 0.5, "relu")) 
Using run directory runs/2024-05-06T14-57-44Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:57:46.559609: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0043s vs `on_train_batch_end` time: 0.0100s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0043s vs `on_train_batch_end` time: 0.0100s). Check your callbacks.
2024-05-06 09:57:47.701597: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
57/57 - 4s - loss: 54.2360 - val_loss: 39.9696 - 4s/epoch - 63ms/step
Epoch 2/20
57/57 - 0s - loss: 42.4228 - val_loss: 29.1418 - 440ms/epoch - 8ms/step
Epoch 3/20
57/57 - 1s - loss: 35.7827 - val_loss: 24.1368 - 823ms/epoch - 14ms/step
Epoch 4/20
57/57 - 0s - loss: 31.3284 - val_loss: 21.2503 - 439ms/epoch - 8ms/step
Epoch 5/20
57/57 - 1s - loss: 28.3253 - val_loss: 20.8096 - 613ms/epoch - 11ms/step
Epoch 6/20
57/57 - 1s - loss: 25.3994 - val_loss: 20.4552 - 642ms/epoch - 11ms/step
Epoch 7/20
57/57 - 0s - loss: 23.2297 - val_loss: 21.8989 - 422ms/epoch - 7ms/step
Epoch 8/20
57/57 - 1s - loss: 21.2513 - val_loss: 22.9363 - 1s/epoch - 18ms/step
Epoch 9/20
57/57 - 0s - loss: 20.3645 - val_loss: 24.5281 - 415ms/epoch - 7ms/step
Epoch 10/20
57/57 - 0s - loss: 19.6282 - val_loss: 24.2801 - 413ms/epoch - 7ms/step
Epoch 11/20
57/57 - 1s - loss: 18.5731 - val_loss: 24.0698 - 643ms/epoch - 11ms/step
Epoch 12/20
57/57 - 0s - loss: 17.1591 - val_loss: 23.1534 - 417ms/epoch - 7ms/step
Epoch 13/20
57/57 - 1s - loss: 16.1036 - val_loss: 22.8076 - 839ms/epoch - 15ms/step
Epoch 14/20
57/57 - 0s - loss: 15.0624 - val_loss: 22.3049 - 416ms/epoch - 7ms/step
Epoch 15/20
57/57 - 1s - loss: 14.0257 - val_loss: 21.9625 - 617ms/epoch - 11ms/step
Epoch 16/20
57/57 - 0s - loss: 13.3588 - val_loss: 21.1181 - 409ms/epoch - 7ms/step
Epoch 17/20
57/57 - 1s - loss: 12.5951 - val_loss: 20.8273 - 624ms/epoch - 11ms/step
Epoch 18/20
57/57 - 1s - loss: 12.1326 - val_loss: 19.8063 - 835ms/epoch - 15ms/step
Epoch 19/20
57/57 - 0s - loss: 11.6216 - val_loss: 19.3653 - 455ms/epoch - 8ms/step
Epoch 20/20
57/57 - 1s - loss: 11.2639 - val_loss: 19.1199 - 662ms/epoch - 12ms/step

Run completed: runs/2024-05-06T14-57-44Z

Training run 7/20 (flags = list(0.001, 8, 64, 0.2, "relu")) 
Using run directory runs/2024-05-06T14-57-59Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:58:00.117053: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0051s vs `on_train_batch_end` time: 0.0661s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0051s vs `on_train_batch_end` time: 0.0661s). Check your callbacks.
2024-05-06 09:58:03.043125: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
113/113 - 4s - loss: 27.8824 - val_loss: 10.0069 - 4s/epoch - 33ms/step
Epoch 2/20
113/113 - 1s - loss: 14.4011 - val_loss: 7.1101 - 1s/epoch - 11ms/step
Epoch 3/20
113/113 - 1s - loss: 10.9057 - val_loss: 6.6431 - 933ms/epoch - 8ms/step
Epoch 4/20
113/113 - 1s - loss: 7.8081 - val_loss: 7.3311 - 944ms/epoch - 8ms/step
Epoch 5/20
113/113 - 1s - loss: 4.8643 - val_loss: 8.3484 - 1s/epoch - 10ms/step
Epoch 6/20
113/113 - 1s - loss: 3.5000 - val_loss: 8.6690 - 1s/epoch - 10ms/step
Epoch 7/20
113/113 - 1s - loss: 2.9008 - val_loss: 8.8686 - 706ms/epoch - 6ms/step
Epoch 8/20
113/113 - 1s - loss: 2.5954 - val_loss: 8.8508 - 930ms/epoch - 8ms/step
Epoch 9/20
113/113 - 1s - loss: 2.3520 - val_loss: 8.8799 - 922ms/epoch - 8ms/step
Epoch 10/20
113/113 - 1s - loss: 2.2080 - val_loss: 9.0141 - 927ms/epoch - 8ms/step
Epoch 11/20
113/113 - 1s - loss: 2.0546 - val_loss: 8.7873 - 1s/epoch - 10ms/step
Epoch 12/20
113/113 - 1s - loss: 1.8869 - val_loss: 9.1333 - 1s/epoch - 10ms/step
Epoch 13/20
113/113 - 1s - loss: 1.7640 - val_loss: 9.3086 - 950ms/epoch - 8ms/step
Epoch 14/20
113/113 - 1s - loss: 1.6231 - val_loss: 9.0543 - 935ms/epoch - 8ms/step
Epoch 15/20
113/113 - 1s - loss: 1.5163 - val_loss: 9.3269 - 939ms/epoch - 8ms/step
Epoch 16/20
113/113 - 1s - loss: 1.4586 - val_loss: 9.2640 - 933ms/epoch - 8ms/step
Epoch 17/20
113/113 - 1s - loss: 1.3899 - val_loss: 9.9020 - 936ms/epoch - 8ms/step
Epoch 18/20
113/113 - 1s - loss: 1.2809 - val_loss: 9.8456 - 891ms/epoch - 8ms/step
Epoch 19/20
113/113 - 1s - loss: 1.2347 - val_loss: 9.3953 - 703ms/epoch - 6ms/step
Epoch 20/20
113/113 - 1s - loss: 1.2234 - val_loss: 9.6033 - 888ms/epoch - 8ms/step

Run completed: runs/2024-05-06T14-57-59Z

Training run 8/20 (flags = list(0.5, 32, 32, 0.3, "relu")) 
Using run directory runs/2024-05-06T14-58-22Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:58:22.755810: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0041s vs `on_train_batch_end` time: 0.0099s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0041s vs `on_train_batch_end` time: 0.0099s). Check your callbacks.
2024-05-06 09:58:26.008537: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
226/226 - 4s - loss: 14.1427 - val_loss: 1.4044 - 4s/epoch - 18ms/step
Epoch 2/20
226/226 - 2s - loss: 3.3903 - val_loss: 0.7778 - 2s/epoch - 9ms/step
Epoch 3/20
226/226 - 2s - loss: 1.4087 - val_loss: 1.1622 - 2s/epoch - 9ms/step
Epoch 4/20
226/226 - 1s - loss: 0.8784 - val_loss: 1.3062 - 1s/epoch - 7ms/step
Epoch 5/20
226/226 - 1s - loss: 0.6745 - val_loss: 1.4484 - 1s/epoch - 7ms/step
Epoch 6/20
226/226 - 2s - loss: 0.5957 - val_loss: 1.7683 - 2s/epoch - 7ms/step
Epoch 7/20
226/226 - 1s - loss: 0.5664 - val_loss: 2.2544 - 1s/epoch - 7ms/step
Epoch 8/20
226/226 - 2s - loss: 0.5056 - val_loss: 3.0286 - 2s/epoch - 8ms/step
Epoch 9/20
226/226 - 2s - loss: 0.4943 - val_loss: 3.6171 - 2s/epoch - 7ms/step
Epoch 10/20
226/226 - 2s - loss: 0.4270 - val_loss: 4.5025 - 2s/epoch - 8ms/step
Epoch 11/20
226/226 - 2s - loss: 0.3557 - val_loss: 6.1073 - 2s/epoch - 8ms/step
Epoch 12/20
226/226 - 2s - loss: 0.2998 - val_loss: 6.1265 - 2s/epoch - 7ms/step
Epoch 13/20
226/226 - 2s - loss: 0.2564 - val_loss: 5.8923 - 2s/epoch - 8ms/step
Epoch 14/20
226/226 - 2s - loss: 0.2483 - val_loss: 7.0588 - 2s/epoch - 8ms/step
Epoch 15/20
226/226 - 1s - loss: 0.2262 - val_loss: 7.1452 - 1s/epoch - 7ms/step
Epoch 16/20
226/226 - 2s - loss: 0.2271 - val_loss: 6.2520 - 2s/epoch - 7ms/step
Epoch 17/20
226/226 - 2s - loss: 0.2006 - val_loss: 6.2938 - 2s/epoch - 7ms/step
Epoch 18/20
226/226 - 2s - loss: 0.1824 - val_loss: 6.5048 - 2s/epoch - 7ms/step
Epoch 19/20
226/226 - 1s - loss: 0.1703 - val_loss: 5.4294 - 1s/epoch - 7ms/step
Epoch 20/20
226/226 - 2s - loss: 0.2027 - val_loss: 4.8250 - 2s/epoch - 8ms/step

Run completed: runs/2024-05-06T14-58-22Z

Training run 9/20 (flags = list(0.5, 64, 64, 0.5, "relu")) 
Using run directory runs/2024-05-06T14-58-58Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:58:58.587922: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.0089s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.0089s). Check your callbacks.
2024-05-06 09:59:01.622113: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
113/113 - 4s - loss: 16.3675 - val_loss: 4.4610 - 4s/epoch - 37ms/step
Epoch 2/20
113/113 - 1s - loss: 4.3639 - val_loss: 2.9391 - 1s/epoch - 9ms/step
Epoch 3/20
113/113 - 1s - loss: 2.3112 - val_loss: 2.8172 - 844ms/epoch - 7ms/step
Epoch 4/20
113/113 - 1s - loss: 1.7284 - val_loss: 3.3120 - 714ms/epoch - 6ms/step
Epoch 5/20
113/113 - 1s - loss: 1.3500 - val_loss: 2.7280 - 1s/epoch - 12ms/step
Epoch 6/20
113/113 - 1s - loss: 1.2119 - val_loss: 3.1502 - 1s/epoch - 10ms/step
Epoch 7/20
113/113 - 1s - loss: 1.0716 - val_loss: 3.1872 - 1s/epoch - 10ms/step
Epoch 8/20
113/113 - 1s - loss: 0.9595 - val_loss: 3.0978 - 931ms/epoch - 8ms/step
Epoch 9/20
113/113 - 1s - loss: 0.8513 - val_loss: 2.8730 - 937ms/epoch - 8ms/step
Epoch 10/20
113/113 - 1s - loss: 0.8251 - val_loss: 2.8990 - 922ms/epoch - 8ms/step
Epoch 11/20
113/113 - 1s - loss: 0.7660 - val_loss: 2.9483 - 1s/epoch - 12ms/step
Epoch 12/20
113/113 - 1s - loss: 0.6602 - val_loss: 2.7560 - 940ms/epoch - 8ms/step
Epoch 13/20
113/113 - 1s - loss: 0.5844 - val_loss: 2.8620 - 1s/epoch - 10ms/step
Epoch 14/20
113/113 - 1s - loss: 0.5224 - val_loss: 3.1427 - 1s/epoch - 10ms/step
Epoch 15/20
113/113 - 1s - loss: 0.5022 - val_loss: 3.1201 - 696ms/epoch - 6ms/step
Epoch 16/20
113/113 - 1s - loss: 0.4721 - val_loss: 3.0309 - 946ms/epoch - 8ms/step
Epoch 17/20
113/113 - 1s - loss: 0.4312 - val_loss: 3.3700 - 944ms/epoch - 8ms/step
Epoch 18/20
113/113 - 1s - loss: 0.3745 - val_loss: 3.1685 - 930ms/epoch - 8ms/step
Epoch 19/20
113/113 - 1s - loss: 0.3299 - val_loss: 3.6113 - 1s/epoch - 10ms/step
Epoch 20/20
113/113 - 1s - loss: 0.3444 - val_loss: 3.0329 - 1s/epoch - 10ms/step

Run completed: runs/2024-05-06T14-58-58Z

Training run 10/20 (flags = list(0.001, 32, 16, 0.1, "relu")) 
Using run directory runs/2024-05-06T14-59-22Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 09:59:22.860434: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0048s vs `on_train_batch_end` time: 0.0682s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0048s vs `on_train_batch_end` time: 0.0682s). Check your callbacks.
2024-05-06 09:59:27.537747: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
451/451 - 6s - loss: 5.4132 - val_loss: 2.2404 - 6s/epoch - 13ms/step
Epoch 2/20
451/451 - 3s - loss: 0.2787 - val_loss: 2.8976 - 3s/epoch - 7ms/step
Epoch 3/20
451/451 - 3s - loss: 0.1131 - val_loss: 3.4428 - 3s/epoch - 7ms/step
Epoch 4/20
451/451 - 3s - loss: 0.0763 - val_loss: 3.0443 - 3s/epoch - 6ms/step
Epoch 5/20
451/451 - 3s - loss: 0.0570 - val_loss: 3.2611 - 3s/epoch - 6ms/step
Epoch 6/20
451/451 - 3s - loss: 0.0493 - val_loss: 3.5065 - 3s/epoch - 6ms/step
Epoch 7/20
451/451 - 3s - loss: 0.0466 - val_loss: 3.2422 - 3s/epoch - 6ms/step
Epoch 8/20
451/451 - 3s - loss: 0.0528 - val_loss: 3.9538 - 3s/epoch - 6ms/step
Epoch 9/20
451/451 - 3s - loss: 0.1137 - val_loss: 4.0139 - 3s/epoch - 7ms/step
Epoch 10/20
451/451 - 3s - loss: 0.3051 - val_loss: 7.7234 - 3s/epoch - 6ms/step
Epoch 11/20
451/451 - 3s - loss: 0.6024 - val_loss: 4.9395 - 3s/epoch - 6ms/step
Epoch 12/20
451/451 - 3s - loss: 0.4960 - val_loss: 5.7518 - 3s/epoch - 6ms/step
Epoch 13/20
451/451 - 3s - loss: 0.6740 - val_loss: 6.0071 - 3s/epoch - 6ms/step
Epoch 14/20
451/451 - 3s - loss: 0.8054 - val_loss: 5.9459 - 3s/epoch - 6ms/step
Epoch 15/20
451/451 - 3s - loss: 0.8653 - val_loss: 7.6197 - 3s/epoch - 6ms/step
Epoch 16/20
451/451 - 3s - loss: 0.9881 - val_loss: 7.2715 - 3s/epoch - 7ms/step
Epoch 17/20
451/451 - 3s - loss: 1.7702 - val_loss: 4.9933 - 3s/epoch - 7ms/step
Epoch 18/20
451/451 - 3s - loss: 0.8956 - val_loss: 7.7228 - 3s/epoch - 7ms/step
Epoch 19/20
451/451 - 3s - loss: 1.1377 - val_loss: 6.0371 - 3s/epoch - 7ms/step
Epoch 20/20
451/451 - 3s - loss: 0.9618 - val_loss: 4.6644 - 3s/epoch - 7ms/step

Run completed: runs/2024-05-06T14-59-22Z

Training run 11/20 (flags = list(0.001, 128, 128, 0.1, "relu")) 
Using run directory runs/2024-05-06T15-00-23Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:00:24.091839: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_begin` is slow compared to the batch time (batch time: 0.0053s vs `on_train_batch_begin` time: 0.0261s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_begin` is slow compared to the batch time (batch time: 0.0053s vs `on_train_batch_begin` time: 0.0261s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0053s vs `on_train_batch_end` time: 0.2079s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0053s vs `on_train_batch_end` time: 0.2079s). Check your callbacks.
2024-05-06 10:00:26.701102: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
57/57 - 4s - loss: 6.7932 - val_loss: 0.3520 - 4s/epoch - 62ms/step
Epoch 2/20
57/57 - 0s - loss: 1.1441 - val_loss: 0.1199 - 433ms/epoch - 8ms/step
Epoch 3/20
57/57 - 1s - loss: 0.9673 - val_loss: 0.0774 - 659ms/epoch - 12ms/step
Epoch 4/20
57/57 - 0s - loss: 0.8415 - val_loss: 0.0567 - 419ms/epoch - 7ms/step
Epoch 5/20
57/57 - 1s - loss: 0.7376 - val_loss: 0.0749 - 646ms/epoch - 11ms/step
Epoch 6/20
57/57 - 0s - loss: 0.6615 - val_loss: 0.0482 - 421ms/epoch - 7ms/step
Epoch 7/20
57/57 - 1s - loss: 0.5997 - val_loss: 0.0830 - 845ms/epoch - 15ms/step
Epoch 8/20
57/57 - 1s - loss: 0.5506 - val_loss: 0.0730 - 653ms/epoch - 11ms/step
Epoch 9/20
57/57 - 0s - loss: 0.4975 - val_loss: 0.1078 - 429ms/epoch - 8ms/step
Epoch 10/20
57/57 - 1s - loss: 0.4809 - val_loss: 0.0800 - 852ms/epoch - 15ms/step
Epoch 11/20
57/57 - 0s - loss: 0.4315 - val_loss: 0.0916 - 435ms/epoch - 8ms/step
Epoch 12/20
57/57 - 1s - loss: 0.4245 - val_loss: 0.0799 - 611ms/epoch - 11ms/step
Epoch 13/20
57/57 - 1s - loss: 0.4424 - val_loss: 0.0902 - 806ms/epoch - 14ms/step
Epoch 14/20
57/57 - 0s - loss: 0.4757 - val_loss: 0.1759 - 422ms/epoch - 7ms/step
Epoch 15/20
57/57 - 1s - loss: 0.5080 - val_loss: 0.0634 - 844ms/epoch - 15ms/step
Epoch 16/20
57/57 - 0s - loss: 0.6966 - val_loss: 0.1159 - 423ms/epoch - 7ms/step
Epoch 17/20
57/57 - 0s - loss: 0.8899 - val_loss: 0.1272 - 420ms/epoch - 7ms/step
Epoch 18/20
57/57 - 1s - loss: 1.2849 - val_loss: 0.7818 - 836ms/epoch - 15ms/step
Epoch 19/20
57/57 - 0s - loss: 1.5460 - val_loss: 0.1634 - 475ms/epoch - 8ms/step
Epoch 20/20
57/57 - 1s - loss: 1.6178 - val_loss: 0.8310 - 595ms/epoch - 10ms/step

Run completed: runs/2024-05-06T15-00-23Z

Training run 12/20 (flags = list(0.1, 16, 32, 0.5, "relu")) 
Using run directory runs/2024-05-06T15-00-38Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:00:39.227202: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0050s vs `on_train_batch_end` time: 0.0983s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0050s vs `on_train_batch_end` time: 0.0983s). Check your callbacks.
2024-05-06 10:00:42.673791: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
226/226 - 5s - loss: 42.3642 - val_loss: 17.8899 - 5s/epoch - 21ms/step
Epoch 2/20
226/226 - 1s - loss: 16.8195 - val_loss: 16.6061 - 1s/epoch - 7ms/step
Epoch 3/20
226/226 - 2s - loss: 9.7924 - val_loss: 15.2268 - 2s/epoch - 8ms/step
Epoch 4/20
226/226 - 2s - loss: 6.3296 - val_loss: 15.9971 - 2s/epoch - 8ms/step
Epoch 5/20
226/226 - 2s - loss: 4.5965 - val_loss: 15.7332 - 2s/epoch - 8ms/step
Epoch 6/20
226/226 - 2s - loss: 3.5200 - val_loss: 16.1088 - 2s/epoch - 8ms/step
Epoch 7/20
226/226 - 2s - loss: 3.1163 - val_loss: 15.4500 - 2s/epoch - 7ms/step
Epoch 8/20
226/226 - 2s - loss: 2.9552 - val_loss: 16.8626 - 2s/epoch - 8ms/step
Epoch 9/20
226/226 - 2s - loss: 2.9407 - val_loss: 18.6550 - 2s/epoch - 7ms/step
Epoch 10/20
226/226 - 1s - loss: 2.9194 - val_loss: 20.8167 - 1s/epoch - 7ms/step
Epoch 11/20
226/226 - 2s - loss: 2.6893 - val_loss: 21.3925 - 2s/epoch - 7ms/step
Epoch 12/20
226/226 - 1s - loss: 2.3486 - val_loss: 18.9689 - 1s/epoch - 7ms/step
Epoch 13/20
226/226 - 2s - loss: 2.0517 - val_loss: 19.2253 - 2s/epoch - 7ms/step
Epoch 14/20
226/226 - 2s - loss: 1.6478 - val_loss: 20.3939 - 2s/epoch - 8ms/step
Epoch 15/20
226/226 - 2s - loss: 1.4001 - val_loss: 15.6542 - 2s/epoch - 7ms/step
Epoch 16/20
226/226 - 2s - loss: 1.2497 - val_loss: 16.5755 - 2s/epoch - 7ms/step
Epoch 17/20
226/226 - 2s - loss: 0.8850 - val_loss: 16.2725 - 2s/epoch - 9ms/step
Epoch 18/20
226/226 - 2s - loss: 0.7459 - val_loss: 15.4820 - 2s/epoch - 8ms/step
Epoch 19/20
226/226 - 2s - loss: 0.7532 - val_loss: 15.7535 - 2s/epoch - 8ms/step
Epoch 20/20
226/226 - 1s - loss: 0.7799 - val_loss: 15.0892 - 1s/epoch - 7ms/step

Run completed: runs/2024-05-06T15-00-38Z

Training run 13/20 (flags = list(0.001, 16, 128, 0.1, "relu")) 
Using run directory runs/2024-05-06T15-01-16Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:01:17.193063: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0051s vs `on_train_batch_end` time: 0.0104s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0051s vs `on_train_batch_end` time: 0.0104s). Check your callbacks.
2024-05-06 10:01:19.315139: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
57/57 - 3s - loss: 32.8509 - val_loss: 6.3026 - 3s/epoch - 51ms/step
Epoch 2/20
57/57 - 1s - loss: 7.4565 - val_loss: 1.1933 - 1s/epoch - 20ms/step
Epoch 3/20
57/57 - 1s - loss: 5.2598 - val_loss: 0.8431 - 832ms/epoch - 15ms/step
Epoch 4/20
57/57 - 0s - loss: 4.5574 - val_loss: 0.8813 - 415ms/epoch - 7ms/step
Epoch 5/20
57/57 - 0s - loss: 4.1343 - val_loss: 0.7815 - 414ms/epoch - 7ms/step
Epoch 6/20
57/57 - 1s - loss: 3.7398 - val_loss: 0.7278 - 778ms/epoch - 14ms/step
Epoch 7/20
57/57 - 0s - loss: 3.3985 - val_loss: 0.7768 - 432ms/epoch - 8ms/step
Epoch 8/20
57/57 - 1s - loss: 3.1007 - val_loss: 0.7981 - 659ms/epoch - 12ms/step
Epoch 9/20
57/57 - 0s - loss: 2.7546 - val_loss: 0.9694 - 417ms/epoch - 7ms/step
Epoch 10/20
57/57 - 1s - loss: 2.5242 - val_loss: 1.1810 - 855ms/epoch - 15ms/step
Epoch 11/20
57/57 - 0s - loss: 2.3096 - val_loss: 1.2593 - 422ms/epoch - 7ms/step
Epoch 12/20
57/57 - 0s - loss: 2.0929 - val_loss: 1.4915 - 423ms/epoch - 7ms/step
Epoch 13/20
57/57 - 0s - loss: 1.7844 - val_loss: 1.6409 - 416ms/epoch - 7ms/step
Epoch 14/20
57/57 - 1s - loss: 1.5349 - val_loss: 1.7480 - 836ms/epoch - 15ms/step
Epoch 15/20
57/57 - 0s - loss: 1.2141 - val_loss: 2.3716 - 424ms/epoch - 7ms/step
Epoch 16/20
57/57 - 0s - loss: 1.0039 - val_loss: 2.7482 - 422ms/epoch - 7ms/step
Epoch 17/20
57/57 - 0s - loss: 0.7942 - val_loss: 2.4333 - 443ms/epoch - 8ms/step
Epoch 18/20
57/57 - 1s - loss: 0.6808 - val_loss: 3.0976 - 623ms/epoch - 11ms/step
Epoch 19/20
57/57 - 1s - loss: 0.5678 - val_loss: 3.3911 - 832ms/epoch - 15ms/step
Epoch 20/20
57/57 - 0s - loss: 0.5200 - val_loss: 3.8461 - 444ms/epoch - 8ms/step

Run completed: runs/2024-05-06T15-01-16Z

Training run 14/20 (flags = list(0.5, 128, 32, 0.5, "relu")) 
Using run directory runs/2024-05-06T15-01-31Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:01:31.902364: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0044s vs `on_train_batch_end` time: 0.0093s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0044s vs `on_train_batch_end` time: 0.0093s). Check your callbacks.
2024-05-06 10:01:35.249219: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
226/226 - 4s - loss: 6.7435 - val_loss: 2.2936 - 4s/epoch - 18ms/step
Epoch 2/20
226/226 - 2s - loss: 0.8185 - val_loss: 2.3855 - 2s/epoch - 8ms/step
Epoch 3/20
226/226 - 2s - loss: 0.4637 - val_loss: 2.3568 - 2s/epoch - 8ms/step
Epoch 4/20
226/226 - 2s - loss: 0.3194 - val_loss: 2.4118 - 2s/epoch - 9ms/step
Epoch 5/20
226/226 - 2s - loss: 0.2394 - val_loss: 1.9110 - 2s/epoch - 7ms/step
Epoch 6/20
226/226 - 2s - loss: 0.1977 - val_loss: 2.2398 - 2s/epoch - 9ms/step
Epoch 7/20
226/226 - 2s - loss: 0.1703 - val_loss: 1.7173 - 2s/epoch - 9ms/step
Epoch 8/20
226/226 - 2s - loss: 0.1412 - val_loss: 1.7953 - 2s/epoch - 9ms/step
Epoch 9/20
226/226 - 2s - loss: 0.1579 - val_loss: 1.9782 - 2s/epoch - 7ms/step
Epoch 10/20
226/226 - 2s - loss: 0.1898 - val_loss: 2.3954 - 2s/epoch - 7ms/step
Epoch 11/20
226/226 - 2s - loss: 0.3629 - val_loss: 2.2133 - 2s/epoch - 9ms/step
Epoch 12/20
226/226 - 2s - loss: 0.2497 - val_loss: 2.1539 - 2s/epoch - 9ms/step
Epoch 13/20
226/226 - 2s - loss: 0.5479 - val_loss: 1.7509 - 2s/epoch - 9ms/step
Epoch 14/20
226/226 - 2s - loss: 0.7239 - val_loss: 1.6447 - 2s/epoch - 9ms/step
Epoch 15/20
226/226 - 2s - loss: 0.7419 - val_loss: 2.2293 - 2s/epoch - 7ms/step
Epoch 16/20
226/226 - 2s - loss: 0.6668 - val_loss: 5.7126 - 2s/epoch - 7ms/step
Epoch 17/20
226/226 - 2s - loss: 1.0296 - val_loss: 4.6779 - 2s/epoch - 9ms/step
Epoch 18/20
226/226 - 2s - loss: 3.1392 - val_loss: 28.5436 - 2s/epoch - 9ms/step
Epoch 19/20
226/226 - 2s - loss: 1.2982 - val_loss: 35.5697 - 2s/epoch - 9ms/step
Epoch 20/20
226/226 - 2s - loss: 1.7788 - val_loss: 31.6501 - 2s/epoch - 8ms/step

Run completed: runs/2024-05-06T15-01-31Z

Training run 15/20 (flags = list(0.5, 16, 32, 0.2, "relu")) 
Using run directory runs/2024-05-06T15-02-11Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:02:11.399961: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0048s vs `on_train_batch_end` time: 0.0979s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0048s vs `on_train_batch_end` time: 0.0979s). Check your callbacks.
2024-05-06 10:02:14.933991: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
226/226 - 5s - loss: 15.1082 - val_loss: 2.4513 - 5s/epoch - 21ms/step
Epoch 2/20
226/226 - 2s - loss: 4.0246 - val_loss: 2.3613 - 2s/epoch - 7ms/step
Epoch 3/20
226/226 - 1s - loss: 2.3179 - val_loss: 2.7398 - 1s/epoch - 7ms/step
Epoch 4/20
226/226 - 2s - loss: 1.4267 - val_loss: 3.9060 - 2s/epoch - 9ms/step
Epoch 5/20
226/226 - 2s - loss: 0.9689 - val_loss: 4.5371 - 2s/epoch - 7ms/step
Epoch 6/20
226/226 - 2s - loss: 0.6731 - val_loss: 4.8713 - 2s/epoch - 7ms/step
Epoch 7/20
226/226 - 2s - loss: 0.4922 - val_loss: 5.6833 - 2s/epoch - 8ms/step
Epoch 8/20
226/226 - 2s - loss: 0.4046 - val_loss: 6.0810 - 2s/epoch - 8ms/step
Epoch 9/20
226/226 - 2s - loss: 0.3344 - val_loss: 5.9184 - 2s/epoch - 7ms/step
Epoch 10/20
226/226 - 2s - loss: 0.2714 - val_loss: 6.4398 - 2s/epoch - 8ms/step
Epoch 11/20
226/226 - 1s - loss: 0.2485 - val_loss: 6.7110 - 1s/epoch - 7ms/step
Epoch 12/20
226/226 - 2s - loss: 0.2161 - val_loss: 6.0764 - 2s/epoch - 7ms/step
Epoch 13/20
226/226 - 2s - loss: 0.1849 - val_loss: 7.1760 - 2s/epoch - 8ms/step
Epoch 14/20
226/226 - 1s - loss: 0.1611 - val_loss: 6.3526 - 1s/epoch - 6ms/step
Epoch 15/20
226/226 - 1s - loss: 0.1461 - val_loss: 7.2512 - 1s/epoch - 6ms/step
Epoch 16/20
226/226 - 2s - loss: 0.1321 - val_loss: 7.5646 - 2s/epoch - 7ms/step
Epoch 17/20
226/226 - 2s - loss: 0.1162 - val_loss: 6.9163 - 2s/epoch - 7ms/step
Epoch 18/20
226/226 - 2s - loss: 0.1094 - val_loss: 6.8628 - 2s/epoch - 7ms/step
Epoch 19/20
226/226 - 1s - loss: 0.0999 - val_loss: 7.8049 - 1s/epoch - 6ms/step
Epoch 20/20
226/226 - 2s - loss: 0.0961 - val_loss: 6.8945 - 2s/epoch - 7ms/step

Run completed: runs/2024-05-06T15-02-11Z

Training run 16/20 (flags = list(0.001, 8, 32, 0.3, "relu")) 
Using run directory runs/2024-05-06T15-02-47Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:02:47.420338: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0060s vs `on_train_batch_end` time: 0.2036s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0060s vs `on_train_batch_end` time: 0.2036s). Check your callbacks.
2024-05-06 10:02:51.137654: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
226/226 - 5s - loss: 29.1013 - val_loss: 11.0376 - 5s/epoch - 21ms/step
Epoch 2/20
226/226 - 2s - loss: 10.5182 - val_loss: 13.0675 - 2s/epoch - 7ms/step
Epoch 3/20
226/226 - 2s - loss: 5.5168 - val_loss: 18.3696 - 2s/epoch - 8ms/step
Epoch 4/20
226/226 - 2s - loss: 4.0513 - val_loss: 18.9510 - 2s/epoch - 9ms/step
Epoch 5/20
226/226 - 2s - loss: 3.0583 - val_loss: 20.2050 - 2s/epoch - 8ms/step
Epoch 6/20
226/226 - 2s - loss: 2.1942 - val_loss: 21.3651 - 2s/epoch - 7ms/step
Epoch 7/20
226/226 - 2s - loss: 1.6739 - val_loss: 22.8143 - 2s/epoch - 8ms/step
Epoch 8/20
226/226 - 2s - loss: 1.4806 - val_loss: 22.5425 - 2s/epoch - 8ms/step
Epoch 9/20
226/226 - 2s - loss: 1.2700 - val_loss: 23.3295 - 2s/epoch - 7ms/step
Epoch 10/20
226/226 - 1s - loss: 1.2092 - val_loss: 24.7564 - 1s/epoch - 6ms/step
Epoch 11/20
226/226 - 2s - loss: 1.7698 - val_loss: 28.4375 - 2s/epoch - 8ms/step
Epoch 12/20
226/226 - 2s - loss: 4.3547 - val_loss: 21.3643 - 2s/epoch - 7ms/step
Epoch 13/20
226/226 - 2s - loss: 6.2425 - val_loss: 16.0654 - 2s/epoch - 9ms/step
Epoch 14/20
226/226 - 2s - loss: 3.6671 - val_loss: 16.9212 - 2s/epoch - 8ms/step
Epoch 15/20
226/226 - 2s - loss: 3.5503 - val_loss: 12.9283 - 2s/epoch - 8ms/step
Epoch 16/20
226/226 - 2s - loss: 3.5513 - val_loss: 12.0035 - 2s/epoch - 8ms/step
Epoch 17/20
226/226 - 2s - loss: 3.1212 - val_loss: 13.2702 - 2s/epoch - 7ms/step
Epoch 18/20
226/226 - 2s - loss: 2.6468 - val_loss: 13.0565 - 2s/epoch - 7ms/step
Epoch 19/20
226/226 - 2s - loss: 2.4637 - val_loss: 12.0501 - 2s/epoch - 7ms/step
Epoch 20/20
226/226 - 2s - loss: 2.2244 - val_loss: 12.2772 - 2s/epoch - 7ms/step

Run completed: runs/2024-05-06T15-02-47Z

Training run 17/20 (flags = list(0.001, 128, 32, 0.5, "relu")) 
Using run directory runs/2024-05-06T15-03-24Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:03:26.549386: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0054s vs `on_train_batch_end` time: 0.0097s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0054s vs `on_train_batch_end` time: 0.0097s). Check your callbacks.
2024-05-06 10:03:28.721349: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
226/226 - 4s - loss: 7.5747 - val_loss: 3.4870 - 4s/epoch - 17ms/step
Epoch 2/20
226/226 - 2s - loss: 0.8784 - val_loss: 3.1515 - 2s/epoch - 10ms/step
Epoch 3/20
226/226 - 2s - loss: 0.4849 - val_loss: 2.5994 - 2s/epoch - 9ms/step
Epoch 4/20
226/226 - 2s - loss: 0.3130 - val_loss: 2.5727 - 2s/epoch - 8ms/step
Epoch 5/20
226/226 - 2s - loss: 0.2393 - val_loss: 2.9651 - 2s/epoch - 8ms/step
Epoch 6/20
226/226 - 1s - loss: 0.1816 - val_loss: 2.4316 - 1s/epoch - 7ms/step
Epoch 7/20
226/226 - 2s - loss: 0.1522 - val_loss: 2.4291 - 2s/epoch - 9ms/step
Epoch 8/20
226/226 - 2s - loss: 0.1375 - val_loss: 2.3906 - 2s/epoch - 7ms/step
Epoch 9/20
226/226 - 2s - loss: 0.1426 - val_loss: 2.6801 - 2s/epoch - 7ms/step
Epoch 10/20
226/226 - 2s - loss: 0.1421 - val_loss: 2.3931 - 2s/epoch - 8ms/step
Epoch 11/20
226/226 - 2s - loss: 0.1620 - val_loss: 1.9415 - 2s/epoch - 8ms/step
Epoch 12/20
226/226 - 2s - loss: 0.1833 - val_loss: 1.9059 - 2s/epoch - 8ms/step
Epoch 13/20
226/226 - 2s - loss: 0.5284 - val_loss: 4.3917 - 2s/epoch - 8ms/step
Epoch 14/20
226/226 - 2s - loss: 1.4037 - val_loss: 2.5431 - 2s/epoch - 7ms/step
Epoch 15/20
226/226 - 1s - loss: 2.9297 - val_loss: 5.9466 - 1s/epoch - 7ms/step
Epoch 16/20
226/226 - 2s - loss: 1.7710 - val_loss: 26.1538 - 2s/epoch - 8ms/step
Epoch 17/20
226/226 - 1s - loss: 2.7779 - val_loss: 19.3276 - 1s/epoch - 7ms/step
Epoch 18/20
226/226 - 2s - loss: 1.8544 - val_loss: 4.2559 - 2s/epoch - 8ms/step
Epoch 19/20
226/226 - 2s - loss: 9.7696 - val_loss: 23.6507 - 2s/epoch - 8ms/step
Epoch 20/20
226/226 - 1s - loss: 0.5702 - val_loss: 36.0097 - 1s/epoch - 7ms/step

Run completed: runs/2024-05-06T15-03-24Z

Training run 18/20 (flags = list(0.5, 32, 64, 0.3, "relu")) 
Using run directory runs/2024-05-06T15-04-02Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:04:03.283512: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0045s vs `on_train_batch_end` time: 0.0089s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0045s vs `on_train_batch_end` time: 0.0089s). Check your callbacks.
2024-05-06 10:04:06.284323: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
113/113 - 4s - loss: 17.6082 - val_loss: 2.3672 - 4s/epoch - 34ms/step
Epoch 2/20
113/113 - 1s - loss: 6.9668 - val_loss: 1.7528 - 882ms/epoch - 8ms/step
Epoch 3/20
113/113 - 1s - loss: 4.7502 - val_loss: 1.8603 - 711ms/epoch - 6ms/step
Epoch 4/20
113/113 - 1s - loss: 3.1467 - val_loss: 1.7429 - 961ms/epoch - 9ms/step
Epoch 5/20
113/113 - 1s - loss: 2.2308 - val_loss: 1.9816 - 943ms/epoch - 8ms/step
Epoch 6/20
113/113 - 1s - loss: 1.6439 - val_loss: 2.2695 - 936ms/epoch - 8ms/step
Epoch 7/20
113/113 - 1s - loss: 1.1902 - val_loss: 1.7954 - 1s/epoch - 10ms/step
Epoch 8/20
113/113 - 1s - loss: 0.8874 - val_loss: 2.1697 - 707ms/epoch - 6ms/step
Epoch 9/20
113/113 - 1s - loss: 0.7588 - val_loss: 2.3111 - 934ms/epoch - 8ms/step
Epoch 10/20
113/113 - 1s - loss: 0.6286 - val_loss: 2.2440 - 1s/epoch - 12ms/step
Epoch 11/20
113/113 - 1s - loss: 0.5317 - val_loss: 2.3643 - 934ms/epoch - 8ms/step
Epoch 12/20
113/113 - 1s - loss: 0.4632 - val_loss: 2.2526 - 930ms/epoch - 8ms/step
Epoch 13/20
113/113 - 1s - loss: 0.4096 - val_loss: 2.5801 - 954ms/epoch - 8ms/step
Epoch 14/20
113/113 - 1s - loss: 0.3803 - val_loss: 2.1516 - 931ms/epoch - 8ms/step
Epoch 15/20
113/113 - 1s - loss: 0.3494 - val_loss: 2.3517 - 926ms/epoch - 8ms/step
Epoch 16/20
113/113 - 1s - loss: 0.3167 - val_loss: 2.5672 - 933ms/epoch - 8ms/step
Epoch 17/20
113/113 - 1s - loss: 0.3074 - val_loss: 2.8332 - 839ms/epoch - 7ms/step
Epoch 18/20
113/113 - 1s - loss: 0.2822 - val_loss: 2.8748 - 703ms/epoch - 6ms/step
Epoch 19/20
113/113 - 1s - loss: 0.2659 - val_loss: 2.7788 - 947ms/epoch - 8ms/step
Epoch 20/20
113/113 - 1s - loss: 0.2508 - val_loss: 2.8518 - 939ms/epoch - 8ms/step

Run completed: runs/2024-05-06T15-04-02Z

Training run 19/20 (flags = list(0.5, 32, 128, 0.3, "relu")) 
Using run directory runs/2024-05-06T15-04-24Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:04:25.384704: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0049s vs `on_train_batch_end` time: 0.0095s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0049s vs `on_train_batch_end` time: 0.0095s). Check your callbacks.
2024-05-06 10:04:27.757719: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
57/57 - 3s - loss: 22.1191 - val_loss: 4.0862 - 3s/epoch - 55ms/step
Epoch 2/20
57/57 - 1s - loss: 9.2245 - val_loss: 2.9485 - 1s/epoch - 19ms/step
Epoch 3/20
57/57 - 1s - loss: 7.3146 - val_loss: 2.3437 - 861ms/epoch - 15ms/step
Epoch 4/20
57/57 - 0s - loss: 6.3196 - val_loss: 2.3771 - 424ms/epoch - 7ms/step
Epoch 5/20
57/57 - 0s - loss: 5.4482 - val_loss: 2.4674 - 431ms/epoch - 8ms/step
Epoch 6/20
57/57 - 0s - loss: 4.7752 - val_loss: 2.8429 - 449ms/epoch - 8ms/step
Epoch 7/20
57/57 - 0s - loss: 4.0931 - val_loss: 2.9437 - 432ms/epoch - 8ms/step
Epoch 8/20
57/57 - 1s - loss: 3.6478 - val_loss: 3.2997 - 1s/epoch - 19ms/step
Epoch 9/20
57/57 - 1s - loss: 3.0678 - val_loss: 3.6161 - 676ms/epoch - 12ms/step
Epoch 10/20
57/57 - 0s - loss: 2.6597 - val_loss: 3.8673 - 426ms/epoch - 7ms/step
Epoch 11/20
57/57 - 1s - loss: 2.2568 - val_loss: 4.8779 - 613ms/epoch - 11ms/step
Epoch 12/20
57/57 - 0s - loss: 1.9041 - val_loss: 4.4440 - 468ms/epoch - 8ms/step
Epoch 13/20
57/57 - 1s - loss: 1.6503 - val_loss: 5.6607 - 634ms/epoch - 11ms/step
Epoch 14/20
57/57 - 0s - loss: 1.5280 - val_loss: 6.0256 - 428ms/epoch - 8ms/step
Epoch 15/20
57/57 - 1s - loss: 1.3766 - val_loss: 5.7559 - 625ms/epoch - 11ms/step
Epoch 16/20
57/57 - 1s - loss: 1.2884 - val_loss: 6.3102 - 578ms/epoch - 10ms/step
Epoch 17/20
57/57 - 0s - loss: 1.2674 - val_loss: 5.9422 - 445ms/epoch - 8ms/step
Epoch 18/20
57/57 - 1s - loss: 1.1961 - val_loss: 5.8227 - 851ms/epoch - 15ms/step
Epoch 19/20
57/57 - 0s - loss: 1.1268 - val_loss: 6.9040 - 429ms/epoch - 8ms/step
Epoch 20/20
57/57 - 1s - loss: 1.0341 - val_loss: 6.4695 - 682ms/epoch - 12ms/step

Run completed: runs/2024-05-06T15-04-24Z

Training run 20/20 (flags = list(0.01, 64, 16, 0.2, "relu")) 
Using run directory runs/2024-05-06T15-04-40Z

> FLAGS<- flags(
+   flag_numeric("nodes", 32),
+   flag_numeric("batch_size",32),
+   flag_string("activation","relu"),
+   flag_numeric("learning_ra ..." ... [TRUNCATED] 

> model = keras_model_sequential()

> model %>%
+   layer_dense(units = FLAGS$nodes, activation = FLAGS$activation, input_shape = dim(carbonTrainingFinal)[2]) %>%
+   layer_dropout(rate= .... [TRUNCATED] 

> model %>% compile(
+   loss="mse",
+   optimizer=optimizer_adam(lr=FLAGS$learning_rate)
+ )
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.

> model %>%fit(  as.matrix(carbonTrainingFinal),
+                carbonTrainingLabels,
+                batch_size=FLAGS$batch_size,
+                .... [TRUNCATED] 
Epoch 1/20
2024-05-06 10:04:43.116126: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0055s vs `on_train_batch_end` time: 0.0096s). Check your callbacks.
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0055s vs `on_train_batch_end` time: 0.0096s). Check your callbacks.
2024-05-06 10:04:45.725073: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.
451/451 - 6s - loss: 4.0625 - val_loss: 1.2168 - 6s/epoch - 13ms/step
Epoch 2/20
451/451 - 3s - loss: 0.3220 - val_loss: 1.2219 - 3s/epoch - 7ms/step
Epoch 3/20
451/451 - 3s - loss: 0.1863 - val_loss: 0.8444 - 3s/epoch - 7ms/step
Epoch 4/20
451/451 - 3s - loss: 0.1310 - val_loss: 1.0633 - 3s/epoch - 6ms/step
Epoch 5/20
451/451 - 3s - loss: 0.0993 - val_loss: 1.0770 - 3s/epoch - 7ms/step
Epoch 6/20
451/451 - 3s - loss: 0.0809 - val_loss: 0.9941 - 3s/epoch - 7ms/step
Epoch 7/20
451/451 - 3s - loss: 0.0737 - val_loss: 1.3372 - 3s/epoch - 7ms/step
Epoch 8/20
451/451 - 3s - loss: 0.0978 - val_loss: 0.9655 - 3s/epoch - 7ms/step
Epoch 9/20
451/451 - 3s - loss: 0.2461 - val_loss: 3.0716 - 3s/epoch - 6ms/step
Epoch 10/20
451/451 - 3s - loss: 0.4367 - val_loss: 6.2774 - 3s/epoch - 6ms/step
Epoch 11/20
451/451 - 3s - loss: 0.8453 - val_loss: 21.2237 - 3s/epoch - 6ms/step
Epoch 12/20
451/451 - 3s - loss: 1.3331 - val_loss: 34.7938 - 3s/epoch - 6ms/step
Epoch 13/20
451/451 - 3s - loss: 0.7451 - val_loss: 54.1062 - 3s/epoch - 6ms/step
Epoch 14/20
451/451 - 3s - loss: 1.0797 - val_loss: 82.9386 - 3s/epoch - 6ms/step
Epoch 15/20
451/451 - 3s - loss: 1.4240 - val_loss: 101.5648 - 3s/epoch - 6ms/step
Epoch 16/20
451/451 - 3s - loss: 1.7040 - val_loss: 212.4915 - 3s/epoch - 6ms/step
Epoch 17/20
451/451 - 3s - loss: 1.5936 - val_loss: 225.9463 - 3s/epoch - 6ms/step
Epoch 18/20
451/451 - 3s - loss: 2.6989 - val_loss: 299.2869 - 3s/epoch - 6ms/step
Epoch 19/20
451/451 - 3s - loss: 2.1928 - val_loss: 403.6202 - 3s/epoch - 6ms/step
Epoch 20/20
451/451 - 3s - loss: 4.2743 - val_loss: 442.2154 - 3s/epoch - 6ms/step

Run completed: runs/2024-05-06T15-04-40Z

Runs

runs=runs[order(runs$metric_val_loss),]
runs
Data frame: 20 x 23 
# ... with 10 more rows
# ... with 20 more columns:
#   flag_nodes, flag_batch_size, flag_activation, flag_learning_rate, flag_dropout, epochs, epochs_completed, metrics, model, loss_function,
#   optimizer, learning_rate, script, start, end, completed, output, source_code, context, type
view_run(runs$run_dir[1])
Warning: incomplete final line found on '/var/folders/lw/zymjkl5d1g34b21y_8l475p80000gn/T//Rtmps93sC6/file3d375b21f744/source/carbonEmission.R'Warning: incomplete final line found on '/var/folders/lw/zymjkl5d1g34b21y_8l475p80000gn/T//Rtmps93sC6/file3d375b21f744/source/CarbonEmission.R'
dim(carbonTrainingFinal)
[1] 8001   71
dim(carbonValidationFinal)
[1] 799  71
carbonTrainingFinal<-rbind(carbonTrainingFinal,carbonValidationFinal)
carbonTrainingLabels<-c(carbonTrainingLabels,carbonValidationLabels)
dim(carbonTrainingFinal)
[1] 8800   71
BestModel<-keras_model_sequential()%>%
  layer_dense(units = 64,activation = "relu",input_shape = dim(carbonTrainingFinal)[2])%>%
  layer_dropout(rate=0.1)%>%
  layer_dense(units = 64,activation = "relu")%>%
  layer_dropout(rate=0.1)%>%
  layer_dense(units = 64,activation = "relu")%>%
  layer_dropout(rate=0.1)%>%
  layer_dense(units = 1)

BestModel %>% compile(
  loss="mse",
  optimizer=optimizer_adam(lr=0.001)
)
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
WARNING:absl:There is a known slowdown when using v2.11+ Keras optimizers on M1/M2 Macs. Falling back to the legacy Keras optimizer, i.e., `tf.keras.optimizers.legacy.Adam`.
history<-BestModel %>% fit(as.matrix(carbonTrainingFinal),
                       carbonTrainingLabels,
                       batch_size=128,
                       epochs=20,
                       validation_data=list(as.matrix(carbonTestingFinal),carbonTestingLabels)
                         )
Epoch 1/20
2024-05-06 10:06:49.315203: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.

 1/69 [..............................] - ETA: 27s - loss: 67.1868
10/69 [===>..........................] - ETA: 0s - loss: 50.4680 
21/69 [========>.....................] - ETA: 0s - loss: 34.5585
32/69 [============>.................] - ETA: 0s - loss: 24.4306
43/69 [=================>............] - ETA: 0s - loss: 19.2264
54/69 [======================>.......] - ETA: 0s - loss: 15.8056
64/69 [==========================>...] - ETA: 0s - loss: 13.6703
69/69 [==============================] - 1s 6ms/step - loss: 12.8761
2024-05-06 10:06:50.013372: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:114] Plugin optimizer for device_type GPU is enabled.

69/69 [==============================] - 2s 16ms/step - loss: 12.8761 - val_loss: 0.4872
Epoch 2/20

 1/69 [..............................] - ETA: 0s - loss: 1.9323
11/69 [===>..........................] - ETA: 0s - loss: 1.9930
21/69 [========>.....................] - ETA: 0s - loss: 1.8814
31/69 [============>.................] - ETA: 0s - loss: 1.8593
41/69 [================>.............] - ETA: 0s - loss: 1.8222
52/69 [=====================>........] - ETA: 0s - loss: 1.7954
63/69 [==========================>...] - ETA: 0s - loss: 1.7758
69/69 [==============================] - 0s 5ms/step - loss: 1.7630

69/69 [==============================] - 1s 7ms/step - loss: 1.7630 - val_loss: 0.3016
Epoch 3/20

 1/69 [..............................] - ETA: 0s - loss: 1.5298
11/69 [===>..........................] - ETA: 0s - loss: 1.5985
22/69 [========>.....................] - ETA: 0s - loss: 1.6022
32/69 [============>.................] - ETA: 0s - loss: 1.5711
43/69 [=================>............] - ETA: 0s - loss: 1.5530
55/69 [======================>.......] - ETA: 0s - loss: 1.5071
66/69 [===========================>..] - ETA: 0s - loss: 1.4773
69/69 [==============================] - 0s 5ms/step - loss: 1.4701

69/69 [==============================] - 0s 7ms/step - loss: 1.4701 - val_loss: 0.1979
Epoch 4/20

 1/69 [..............................] - ETA: 0s - loss: 1.3780
12/69 [====>.........................] - ETA: 0s - loss: 1.4164
23/69 [=========>....................] - ETA: 0s - loss: 1.3816
34/69 [=============>................] - ETA: 0s - loss: 1.3575
45/69 [==================>...........] - ETA: 0s - loss: 1.3516
56/69 [=======================>......] - ETA: 0s - loss: 1.3298
67/69 [============================>.] - ETA: 0s - loss: 1.3234
69/69 [==============================] - 0s 5ms/step - loss: 1.3209

69/69 [==============================] - 0s 7ms/step - loss: 1.3209 - val_loss: 0.1723
Epoch 5/20

 1/69 [..............................] - ETA: 0s - loss: 1.5950
12/69 [====>.........................] - ETA: 0s - loss: 1.2468
24/69 [=========>....................] - ETA: 0s - loss: 1.1921
36/69 [==============>...............] - ETA: 0s - loss: 1.1731
48/69 [===================>..........] - ETA: 0s - loss: 1.1598
60/69 [=========================>....] - ETA: 0s - loss: 1.1512
69/69 [==============================] - 0s 5ms/step - loss: 1.1474

69/69 [==============================] - 0s 7ms/step - loss: 1.1474 - val_loss: 0.1564
Epoch 6/20

 1/69 [..............................] - ETA: 0s - loss: 1.0523
11/69 [===>..........................] - ETA: 0s - loss: 1.1100
22/69 [========>.....................] - ETA: 0s - loss: 1.0938
33/69 [=============>................] - ETA: 0s - loss: 1.0778
45/69 [==================>...........] - ETA: 0s - loss: 1.0633
56/69 [=======================>......] - ETA: 0s - loss: 1.0653
67/69 [============================>.] - ETA: 0s - loss: 1.0524
69/69 [==============================] - 0s 5ms/step - loss: 1.0502

69/69 [==============================] - 0s 7ms/step - loss: 1.0502 - val_loss: 0.1713
Epoch 7/20

 1/69 [..............................] - ETA: 0s - loss: 0.8185
11/69 [===>..........................] - ETA: 0s - loss: 0.9630
22/69 [========>.....................] - ETA: 0s - loss: 0.9563
33/69 [=============>................] - ETA: 0s - loss: 0.9584
45/69 [==================>...........] - ETA: 0s - loss: 0.9490
55/69 [======================>.......] - ETA: 0s - loss: 0.9391
66/69 [===========================>..] - ETA: 0s - loss: 0.9392
69/69 [==============================] - 0s 5ms/step - loss: 0.9343

69/69 [==============================] - 1s 8ms/step - loss: 0.9343 - val_loss: 0.1433
Epoch 8/20

 1/69 [..............................] - ETA: 0s - loss: 1.0760
11/69 [===>..........................] - ETA: 0s - loss: 0.8604
23/69 [=========>....................] - ETA: 0s - loss: 0.8879
35/69 [==============>...............] - ETA: 0s - loss: 0.8838
46/69 [===================>..........] - ETA: 0s - loss: 0.8758
58/69 [========================>.....] - ETA: 0s - loss: 0.8631
69/69 [==============================] - 0s 5ms/step - loss: 0.8582

69/69 [==============================] - 0s 7ms/step - loss: 0.8582 - val_loss: 0.1558
Epoch 9/20

 1/69 [..............................] - ETA: 0s - loss: 0.7988
11/69 [===>..........................] - ETA: 0s - loss: 0.8231
22/69 [========>.....................] - ETA: 0s - loss: 0.8113
33/69 [=============>................] - ETA: 0s - loss: 0.8156
44/69 [==================>...........] - ETA: 0s - loss: 0.8021
55/69 [======================>.......] - ETA: 0s - loss: 0.7914
66/69 [===========================>..] - ETA: 0s - loss: 0.7827
69/69 [==============================] - 0s 5ms/step - loss: 0.7804

69/69 [==============================] - 0s 7ms/step - loss: 0.7804 - val_loss: 0.1368
Epoch 10/20

 1/69 [..............................] - ETA: 0s - loss: 0.8683
10/69 [===>..........................] - ETA: 0s - loss: 0.7987
20/69 [=======>......................] - ETA: 0s - loss: 0.7896
30/69 [============>.................] - ETA: 0s - loss: 0.7798
39/69 [===============>..............] - ETA: 0s - loss: 0.7703
49/69 [====================>.........] - ETA: 0s - loss: 0.7595
59/69 [========================>.....] - ETA: 0s - loss: 0.7550
69/69 [==============================] - 0s 5ms/step - loss: 0.7436

69/69 [==============================] - 1s 8ms/step - loss: 0.7436 - val_loss: 0.1731
Epoch 11/20

 1/69 [..............................] - ETA: 0s - loss: 0.6209
10/69 [===>..........................] - ETA: 0s - loss: 0.7128
20/69 [=======>......................] - ETA: 0s - loss: 0.6910
30/69 [============>.................] - ETA: 0s - loss: 0.6880
40/69 [================>.............] - ETA: 0s - loss: 0.6841
51/69 [=====================>........] - ETA: 0s - loss: 0.6828
62/69 [=========================>....] - ETA: 0s - loss: 0.6806
69/69 [==============================] - 0s 5ms/step - loss: 0.6818

69/69 [==============================] - 1s 7ms/step - loss: 0.6818 - val_loss: 0.1619
Epoch 12/20

 1/69 [..............................] - ETA: 0s - loss: 0.6595
11/69 [===>..........................] - ETA: 0s - loss: 0.6448
21/69 [========>.....................] - ETA: 0s - loss: 0.6494
32/69 [============>.................] - ETA: 0s - loss: 0.6393
43/69 [=================>............] - ETA: 0s - loss: 0.6370
54/69 [======================>.......] - ETA: 0s - loss: 0.6358
65/69 [===========================>..] - ETA: 0s - loss: 0.6356
69/69 [==============================] - 0s 5ms/step - loss: 0.6363

69/69 [==============================] - 0s 7ms/step - loss: 0.6363 - val_loss: 0.1585
Epoch 13/20

 1/69 [..............................] - ETA: 0s - loss: 0.5724
11/69 [===>..........................] - ETA: 0s - loss: 0.6183
22/69 [========>.....................] - ETA: 0s - loss: 0.6164
33/69 [=============>................] - ETA: 0s - loss: 0.6284
44/69 [==================>...........] - ETA: 0s - loss: 0.6211
55/69 [======================>.......] - ETA: 0s - loss: 0.6146
66/69 [===========================>..] - ETA: 0s - loss: 0.6131
69/69 [==============================] - 0s 5ms/step - loss: 0.6141

69/69 [==============================] - 0s 7ms/step - loss: 0.6141 - val_loss: 0.1638
Epoch 14/20

 1/69 [..............................] - ETA: 0s - loss: 0.4695
11/69 [===>..........................] - ETA: 0s - loss: 0.5839
22/69 [========>.....................] - ETA: 0s - loss: 0.5765
33/69 [=============>................] - ETA: 0s - loss: 0.5786
44/69 [==================>...........] - ETA: 0s - loss: 0.5766
55/69 [======================>.......] - ETA: 0s - loss: 0.5688
66/69 [===========================>..] - ETA: 0s - loss: 0.5602
69/69 [==============================] - 0s 5ms/step - loss: 0.5608

69/69 [==============================] - 0s 7ms/step - loss: 0.5608 - val_loss: 0.1593
Epoch 15/20

 1/69 [..............................] - ETA: 0s - loss: 0.4251
11/69 [===>..........................] - ETA: 0s - loss: 0.5252
22/69 [========>.....................] - ETA: 0s - loss: 0.5172
33/69 [=============>................] - ETA: 0s - loss: 0.5140
44/69 [==================>...........] - ETA: 0s - loss: 0.5078
55/69 [======================>.......] - ETA: 0s - loss: 0.5041
66/69 [===========================>..] - ETA: 0s - loss: 0.4987
69/69 [==============================] - 0s 5ms/step - loss: 0.4981

69/69 [==============================] - 0s 7ms/step - loss: 0.4981 - val_loss: 0.1620
Epoch 16/20

 1/69 [..............................] - ETA: 0s - loss: 0.5678
11/69 [===>..........................] - ETA: 0s - loss: 0.4655
22/69 [========>.....................] - ETA: 0s - loss: 0.4634
33/69 [=============>................] - ETA: 0s - loss: 0.4639
44/69 [==================>...........] - ETA: 0s - loss: 0.4649
56/69 [=======================>......] - ETA: 0s - loss: 0.4679
68/69 [============================>.] - ETA: 0s - loss: 0.4645
69/69 [==============================] - 0s 5ms/step - loss: 0.4638

69/69 [==============================] - 0s 7ms/step - loss: 0.4638 - val_loss: 0.2482
Epoch 17/20

 1/69 [..............................] - ETA: 0s - loss: 0.4742
 5/69 [=>............................] - ETA: 0s - loss: 0.4301
15/69 [=====>........................] - ETA: 0s - loss: 0.4377
26/69 [==========>...................] - ETA: 0s - loss: 0.4242
37/69 [===============>..............] - ETA: 0s - loss: 0.4263
48/69 [===================>..........] - ETA: 0s - loss: 0.4307
59/69 [========================>.....] - ETA: 0s - loss: 0.4260
69/69 [==============================] - 0s 5ms/step - loss: 0.4257

69/69 [==============================] - 1s 8ms/step - loss: 0.4257 - val_loss: 0.2002
Epoch 18/20

 1/69 [..............................] - ETA: 0s - loss: 0.4296
10/69 [===>..........................] - ETA: 0s - loss: 0.4093
20/69 [=======>......................] - ETA: 0s - loss: 0.3958
30/69 [============>.................] - ETA: 0s - loss: 0.3844
40/69 [================>.............] - ETA: 0s - loss: 0.3827
50/69 [====================>.........] - ETA: 0s - loss: 0.3863
60/69 [=========================>....] - ETA: 0s - loss: 0.3824
69/69 [==============================] - 0s 5ms/step - loss: 0.3816

69/69 [==============================] - 1s 8ms/step - loss: 0.3816 - val_loss: 0.2666
Epoch 19/20

 1/69 [..............................] - ETA: 0s - loss: 0.3391
11/69 [===>..........................] - ETA: 0s - loss: 0.3784
22/69 [========>.....................] - ETA: 0s - loss: 0.3767
33/69 [=============>................] - ETA: 0s - loss: 0.3664
44/69 [==================>...........] - ETA: 0s - loss: 0.3631
54/69 [======================>.......] - ETA: 0s - loss: 0.3566
64/69 [==========================>...] - ETA: 0s - loss: 0.3521
69/69 [==============================] - 0s 5ms/step - loss: 0.3519

69/69 [==============================] - 1s 8ms/step - loss: 0.3519 - val_loss: 0.2001
Epoch 20/20

 1/69 [..............................] - ETA: 0s - loss: 0.3680
10/69 [===>..........................] - ETA: 0s - loss: 0.3497
20/69 [=======>......................] - ETA: 0s - loss: 0.3464
30/69 [============>.................] - ETA: 0s - loss: 0.3501
40/69 [================>.............] - ETA: 0s - loss: 0.3428
50/69 [====================>.........] - ETA: 0s - loss: 0.3385
60/69 [=========================>....] - ETA: 0s - loss: 0.3350
69/69 [==============================] - 0s 5ms/step - loss: 0.3350

69/69 [==============================] - 1s 8ms/step - loss: 0.3350 - val_loss: 0.2561
predictBestModel<-model %>% predict(as.matrix(carbonTestingFinal))

 1/63 [..............................] - ETA: 0s
36/63 [================>.............] - ETA: 0s
63/63 [==============================] - 0s 1ms/step

63/63 [==============================] - 0s 1ms/step
rmse=function(x,y){
  return((mean(x-y)^2)^0.5)
}

rmse(predictBestModel,carbonTestingLabels)
[1] 0.2110004
MAE(predictBestModel,carbonTestingLabels)
[1] 0.2486037
rsquaredBest<-sum((predictBestModel-carbonTestingLabels)^2)/sum((carbonTestingLabels-mean(carbonTestingLabels))^2)
rsquaredBest
[1] 0.473026
---
title: "Project"
output: html_notebook
---
# Loading the data
```{r}
carbonData<-read.csv('/Users/angadsingh/Downloads/Carbon Emission.csv')
summary(carbonData)
```

```{r}
str(carbonData)
```
From the str of carbon data i can see that i am having empty vehicle types as "" so i will replace them with No vehicle
```{r}

carbonData$Vehicle.Type[carbonData$Transport=='public'|carbonData$Transport=='walk/bicycle']<-'FuelEfficient'
#carbonData<- carbonData %>% mutate(Vehicle.Type=ifelse(Vehicle.Type=="","No vehicle",Vehicle.Type))
str(carbonData)
```
```{r}
#carbonData[carbonData == ""]<-NA
colSums(is.na(carbonData))
```
```{r}
library(tidyverse)
parseList<-function(x){
  str_remove_all(x,"\\[|\\]|'")%>%
    strsplit(", ")%>%
    unlist()
}
carbonData$Recycling<-sapply(carbonData$Recycling,parseList)
carbonData$Cooking_With<-sapply(carbonData$Cooking_With,parseList)

carbonData$Recycling<-sapply(carbonData$Recycling,paste,collapse=",")
carbonData$Cooking_With<-sapply(carbonData$Cooking_With,paste,collapse=",")

#str(carbonData)

dummies<-function(col){

  items<-unlist(str_split(col,","))
  items<-trimws(items)
  items<-items[items != ""]
  
  uniqueItems<-unique(items)
  dummyDataFrame<-data.frame(matrix(0,nrow = length(col),ncol = length(uniqueItems)))
  colnames(dummyDataFrame)<-uniqueItems
  
  for (i in seq_along(col)) {
    rowItems<-unlist(str_split(col[i],","))%>%
    map_chr(~str_trim(.))%>%
    discard(~.=="")
    
    rowItems<-rowItems[rowItems %in% uniqueItems]
    dummyDataFrame[i,rowItems]<-1
  }
  return(dummyDataFrame)
}

recyclingDummies<-dummies(carbonData$Recycling)
cookingDummies<-dummies(carbonData$Cooking_With)

carbonData<-cbind(carbonData,recyclingDummies,cookingDummies)

carbonData$Recycling<- NULL
carbonData$Cooking_With<-NULL

str(carbonData)
```

```{r}
carbonData<-carbonData %>%
  mutate_if(is.character, as.factor)%>%
  mutate_if(is.integer, as.numeric)

str(carbonData)
summary(carbonData)
```

```{r}
table(carbonData$Body.Type)
table(carbonData$Sex)
table(carbonData$Diet)
table(carbonData$How.Often.Shower)
table(carbonData$Heating.Energy.Source)
table(carbonData$Transport)
table(carbonData$Social.Activity)
table(carbonData$Frequency.of.Traveling.by.Air)
table(carbonData$Waste.Bag.Size)
table(carbonData$Energy.efficiency)
```


```{r}
hist(carbonData$CarbonEmission)
carbonData$CarbonEmission<-log(carbonData$CarbonEmission) 
hist(carbonData$CarbonEmission)
```

```{r}
carbonIndices<-which(names(carbonData)=='CarbonEmission')
for (c in colnames(carbonData[,-carbonIndices])) {
  if(is.factor(carbonData[,c])){
    try({
        anovaaResult<-aov(carbonData$CarbonEmission~carbonData[,c])
        cat("ANOVA of ",c, "and CarbonEmission", "\n")
        print(summary(anovaaResult))
        boxplot(carbonData$CarbonEmission~carbonData[,c],shade=TRUE, main = paste("Carbon Emission vs", c), xlab ="CarbonEmission", ylab=c ,col="lightgreen")
        
      })
  }
  else if (is.numeric(carbonData[,c])){
    try({
      corTest<-cor.test(carbonData$CarbonEmission,carbonData[,c], method = "pearson")
      cat("p.value of ",c, "and Carbon Emission", corTest$p.value, "\n")
      plot(carbonData$CarbonEmission,carbonData[,c], main = paste("Carbon Emission vs", c), xlab ="Carbon Emission", ylab=c)
    })
  }
  
}
```

```{r}
library(caret)

carbonDataIndexs <- createDataPartition(carbonData$CarbonEmission, p=0.8, list=FALSE)

carbonTrainData<-carbonData[carbonDataIndexs,]
carbonTrainData

carbonTestData<-carbonData[-carbonDataIndexs,]
carbonTestData

carbonTestLabels<-carbonTestData$CarbonEmission
```
```{r}
knnModel<-train(CarbonEmission~.,data = carbonTrainData, method="knn", trControl=trainControl(method = "cv", number=5))
```
```{r}
knnModel
```
```{r}
knnPred<-predict(knnModel,newdata = carbonTestData)

rmse=function(x,y){
  return((mean(x-y)^2)^0.5)
}
rmse(knnPred,carbonTestLabels)
```
```{r}
lmModel<-train(CarbonEmission~.,data = carbonTrainData, method="lm", trControl=trainControl(method = "cv", number=5))
lmModel
```
```{r}
summary(lmModel)
```
```{r}
stepwiseModel<-train(CarbonEmission~.,data = carbonTrainData, method="leapBackward", trControl=trainControl(method = "cv", number=5))
stepwiseModel
```
```{r}
summary(stepwiseModel$finalModel)
```
```{r}
colSums(is.na(carbonTrainData))
```


#Lasso Model
```{r}
library(glmnet)
set.seed(1)
lassoModel<-train(CarbonEmission~.,data = carbonTrainData,method="glmnet",trControl= trainControl(method = "cv", number=5), tuneGrid = expand.grid(alpha=1, lambda=10^seq(-3,3,length=100))) 

lassoModel


lassoLambda<-lassoModel$bestTune$lambda
lassoPredictor<- setdiff(names(carbonTrainData),"CarbonEmission")
lassoFinalModel<-glmnet(as.matrix(carbonTrainData[,lassoPredictor]),carbonTrainData[,"CarbonEmission"],alpha = 1,lambda = lassoLambda, family = "gaussian")

coeff<-coef(lassoFinalModel)
coeff

zeroCoeff<-coeff==0
zeroCoeff
```
```{r}
plot(lassoModel)
```


#Ridge Model
```{r}
set.seed(1)
ridgeModel<-train(CarbonEmission~.,data = carbonTrainData,method="glmnet",trControl= trainControl(method = "cv", number=5), tuneGrid = expand.grid(alpha=0, lambda=10^seq(-3,3,length=100))) 

ridgeModel

ridgeLambda<-ridgeModel$bestTune$lambda
ridgePredictor<- setdiff(names(carbonTrainData),"CarbonEmission")
ridgeFinalModel<-glmnet(as.matrix(carbonTrainData[,ridgePredictor]),carbonTrainData[,"CarbonEmission"],alpha = 1,lambda = ridgeLambda, family = "gaussian")

ridgeFinalModel
```
```{r}
plot(ridgeModel)
```
```{r}
set.seed(1)
enetModel<-train(CarbonEmission~., data = carbonTrainData, method = "glmnet", trControl=trainControl(method="cv",number=5,preProc="nzv"),tuneGrid=expand.grid(alpha=seq(0,1,length=10),lambda=10^seq(-3,1,length=100)))

enetModel

enetModel$bestTune
```
```{r}
enetLambda<-enetModel$bestTune$lambda
enetAlpha<-enetModel$bestTune$alpha

enetPredector<-setdiff(names(carbonTrainData),"CarbonEmission")

enetFinalModel<-glmnet(as.matrix(carbonTrainData[,enetPredector]),carbonTrainData[,"CarbonEmission"], alpha = enetAlpha,lambda = enetLambda, family = "gaussian")

enetFinalModel
```
#Random Forest Model
```{r}
library(randomForest)
set.seed(1)
randomForestModel<-randomForest(CarbonEmission~.,data = carbonTrainData)
randomForestModel
```



```{r}
mRf<-train(CarbonEmission~.,
           data=carbonTrainData,
           method="rf",
           trControl=trainControl(method = "cv", number =5)
           )
```

```{r}
mRf
varImp(mRf)
```
```{r}
rfPred<-predict(mRf,newdata = carbonTestData)

MAE(carbonTestData$CarbonEmission,rfPred)
rmse(carbonTestData$CarbonEmission,rfPred)
cor(carbonTestData$CarbonEmission,rfPred)^2
```

```{r}
plot(carbonTestData$CarbonEmission,rfPred)
```
# GBM
```{r}
set.seed(1)

grBoostedTree<-train(
  CarbonEmission~.,
  data = carbonTrainData,
  method="gbm",
  trControl=trainControl(method = "cv",number = 5)
)
```
```{r}
grBoostedTree

gbmPred<-predict(grBoostedTree, carbonTestData)
```
```{r}
MAE(carbonTestData$CarbonEmission,gbmPred)
rmse(carbonTestData$CarbonEmission,gbmPred)
cor(carbonTestData$CarbonEmission,gbmPred)^2
```
```{r}
plot(carbonTestData$CarbonEmission,gbmPred)
```
#SV Linear Model
```{r}
set.seed(1)

svmLinear<-train(
  CarbonEmission~.,
  data = carbonTrainData,
  method="svmLinear",
  trControl=trainControl(method = "cv",number = 5, preProc=c("center","scale"))
)
```
```{r}
svmLinear

svmPred<-predict(svmLinear,carbonTestData)

plot(svmPred,carbonTestData$CarbonEmission)
```
#SVM Radial Model
```{r}
set.seed(1)

svmRadial<-train(
  CarbonEmission~.,
  data = carbonTrainData,
  method="svmRadial",
  trControl=trainControl(method = "cv",number = 5, preProc=c("center","scale"))
)
```

```{r}
svmRadial

svmRadialPred<-predict(svmRadial,carbonTestData)

plot(svmRadialPred,carbonTestData$CarbonEmission)
```
#Comparing models 
```{r}
compare=resamples(list(KNN=knnModel,LIN=lmModel,stepWise=stepwiseModel,Lasso=lassoModel,Ridge=ridgeModel,Enet=enetModel,RF=mRf,GBM=grBoostedTree,SVML=svmLinear,SVMR=svmRadial))
summary(compare) # Out of all the models SVM Radial stands out the most
```

# Neural Network Preprocessing
```{r}
library(caret)
carbonInd<-createDataPartition(carbonTrainData$CarbonEmission,p=0.9,list = FALSE)
carbonIndex<-which(names(carbonTrainData)=='CarbonEmission')

carbonTrainingData<-carbonTrainData[carbonInd,-carbonIndex]
str(carbonTrainingData)

carbonTrainingLabels<-carbonTrainData[carbonInd,carbonIndex]
str(carbonTrainingLabels)

carbonValidationData<-carbonTrainData[-carbonInd,-carbonIndex]
carbonValidationData

carbonValidationLabels<-carbonTrainData[-carbonInd,carbonIndex]
str(carbonValidationLabels)

carbonTestingData<-carbonTestData[,-carbonIndex]
carbonTestingData

carbonTestingLabels<-carbonTestData[,carbonIndex]
str(carbonTestingLabels)
```
```{r}
dim(carbonTrainingData)
dim(carbonTestingData)
```


#Scaling numeric Variables and one hot encoding categorical variables
```{r}
library(mltools)
library(data.table)
numericCols<-c("Monthly.Grocery.Bill","Vehicle.Monthly.Distance.Km","Waste.Bag.Weekly.Count",
               "How.Long.TV.PC.Daily.Hour","How.Many.New.Clothes.Monthly","How.Long.Internet.Daily.Hour","Metal","Paper","Plastic","Glass","Stove","Oven"
               ,"Microwave","Grill","Airfryer")

categoricalCols<-c("Body.Type","Sex","Diet","How.Often.Shower","Heating.Energy.Source","Transport","Vehicle.Type","Social.Activity",
                   "Frequency.of.Traveling.by.Air","Waste.Bag.Size","Energy.efficiency")

carbonTrainingDataNew<-scale(carbonTrainingData[,numericCols])
colMeanTrain<-attr(carbonTrainingDataNew,"scaled:center")
colStddevsTrain<-attr(carbonTrainingDataNew,"scaled:scale")


carbonTrainingData[,numericCols]<-carbonTrainingDataNew
carbonValidationData[,numericCols]<-scale(carbonValidationData[,numericCols],center = colMeanTrain,scale = colStddevsTrain)
carbonTestingData[,numericCols]<-scale(carbonTestingData[,numericCols],center = colMeanTrain,scale = colStddevsTrain)

carbonTrainingTable<-as.data.table(carbonTrainingData)
carbonValidationTable<-as.data.table(carbonValidationData)
carbonTestingTable<-as.data.table(carbonTestingData)

carbonTrainingOneHot<-one_hot(carbonTrainingTable,naCols=FALSE,dropCols=TRUE,dropUnusedLevels=TRUE)
carbonTrainingOneHot

carbonValidationOneHot<-one_hot(carbonValidationTable,naCols=FALSE,dropCols=TRUE,dropUnusedLevels=TRUE)
carbonValidationOneHot

carbonTestingOneHot<-one_hot(carbonTestingTable,naCols=FALSE,dropCols=TRUE,dropUnusedLevels=TRUE)
carbonTestingOneHot

carbonTrainingFinal<-as.data.frame(cbind(carbonTrainingTable[, ..numericCols], carbonTrainingOneHot))
carbonTrainingFinal

carbonValidationFinal<-as.data.frame(cbind(carbonValidationTable[, ..numericCols], carbonValidationOneHot))
carbonValidationFinal

carbonTestingFinal<-as.data.frame(cbind(carbonTestingTable[, ..numericCols], carbonTestingOneHot))
carbonTestingFinal
```
```{r}
library(keras)

model<-keras_model_sequential()%>%
  layer_dense(units = 32,activation = "relu",input_shape = dim(carbonTrainingFinal)[2])%>%
  layer_dropout(rate=0.3)%>%
  layer_dense(units = 32,activation = "relu")%>%
  layer_dropout(rate=0.3)%>%
  layer_dense(units = 16,activation = "relu")%>%
  layer_dropout(rate=0.3)%>%
  layer_dense(units = 1)

model %>% compile(
  loss="mse",
  optimizer=optimizer_adam(lr=0.001)
)

history<-model %>% fit(as.matrix(carbonTrainingFinal),
                       carbonTrainingLabels,
                       batch_size=50,
                       epochs=20,
                       validation_data=list(as.matrix(carbonValidationFinal),carbonValidationLabels)
                         )
```
```{r}
kerasPrediction<-model %>% predict(as.matrix(carbonTestingFinal))

rmse=function(x,y){
  return((mean(x-y)^2)^0.5)
}

rmse(kerasPrediction,carbonTestLabels)
MAE(kerasPrediction,carbonTestLabels)
rsquared<-sum((kerasPrediction-carbonTestLabels)^2)/sum((carbonTestLabels-mean(carbonTestLabels))^2)
rsquared
```
```{r}
library(tfruns)
runs<-tuning_run(
  "carbonEmission.R",
  flags=list(
    learning_rate=c(0.1,0.5,0.01,0.001),
    nodes=c(8,16,32,64,128),
    batch_size=c(16,32,64,128),
    dropout=c(0.1,0.2,0.3,0.4,0.5),
    activation=c("relu")
  ),sample=0.05
)
```

# Runs
```{r}
runs=runs[order(runs$metric_val_loss),]
runs
view_run(runs$run_dir[1])
```
```{r}
dim(carbonTrainingFinal)
dim(carbonValidationFinal)
carbonTrainingFinal<-rbind(carbonTrainingFinal,carbonValidationFinal)
carbonTrainingLabels<-c(carbonTrainingLabels,carbonValidationLabels)
dim(carbonTrainingFinal)
```


```{r}
BestModel<-keras_model_sequential()%>%
  layer_dense(units = 64,activation = "relu",input_shape = dim(carbonTrainingFinal)[2])%>%
  layer_dropout(rate=0.1)%>%
  layer_dense(units = 64,activation = "relu")%>%
  layer_dropout(rate=0.1)%>%
  layer_dense(units = 64,activation = "relu")%>%
  layer_dropout(rate=0.1)%>%
  layer_dense(units = 1)

BestModel %>% compile(
  loss="mse",
  optimizer=optimizer_adam(lr=0.001)
)

history<-BestModel %>% fit(as.matrix(carbonTrainingFinal),
                       carbonTrainingLabels,
                       batch_size=128,
                       epochs=20,
                       validation_data=list(as.matrix(carbonTestingFinal),carbonTestingLabels)
                         )
```
```{r}
predictBestModel<-model %>% predict(as.matrix(carbonTestingFinal))
```
```{r}
rmse=function(x,y){
  return((mean(x-y)^2)^0.5)
}

rmse(predictBestModel,carbonTestingLabels)
MAE(predictBestModel,carbonTestingLabels)
rsquaredBest<-sum((predictBestModel-carbonTestingLabels)^2)/sum((carbonTestingLabels-mean(carbonTestingLabels))^2)
rsquaredBest
```

